2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
36 #include <rdma/ib_verbs.h>
37 #include <rdma/hns-abi.h>
38 #include "hns_roce_debugfs.h"
40 #define PCI_REVISION_ID_HIP08 0x21
41 #define PCI_REVISION_ID_HIP09 0x30
43 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
45 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6
49 #define HNS_ROCE_MIN_CQE_NUM 0x40
50 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1
52 #define HNS_ROCE_MAX_IRQ_NUM 128
54 #define HNS_ROCE_SGE_IN_WQE 2
55 #define HNS_ROCE_SGE_SHIFT 4
60 #define HNS_ROCE_CEQ 0
61 #define HNS_ROCE_AEQ 1
63 #define HNS_ROCE_CEQE_SIZE 0x4
64 #define HNS_ROCE_AEQE_SIZE 0x10
66 #define HNS_ROCE_V3_EQE_SIZE 0x40
68 #define HNS_ROCE_V2_CQE_SIZE 32
69 #define HNS_ROCE_V3_CQE_SIZE 64
71 #define HNS_ROCE_V2_QPC_SZ 256
72 #define HNS_ROCE_V3_QPC_SZ 512
74 #define HNS_ROCE_MAX_PORTS 6
75 #define HNS_ROCE_GID_SIZE 16
76 #define HNS_ROCE_SGE_SIZE 16
77 #define HNS_ROCE_DWQE_SIZE 65536
79 #define HNS_ROCE_HOP_NUM_0 0xff
81 #define MR_TYPE_MR 0x00
82 #define MR_TYPE_FRMR 0x01
83 #define MR_TYPE_DMA 0x03
85 #define HNS_ROCE_FRMR_MAX_PA 512
87 #define PKEY_ID 0xffff
88 #define NODE_DESC_SIZE 64
89 #define DB_REG_OFFSET 0x1000
91 /* Configure to HW for PAGE_SIZE larger than 4KB */
92 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
94 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
95 #define SRQ_DB_REG 0x230
97 #define HNS_ROCE_QP_BANK_NUM 8
98 #define HNS_ROCE_CQ_BANK_NUM 4
100 #define CQ_BANKID_SHIFT 2
101 #define CQ_BANKID_MASK GENMASK(1, 0)
111 enum hns_roce_event {
112 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
113 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
114 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
115 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
116 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
117 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
118 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
119 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
120 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
121 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
122 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
123 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
124 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
125 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
126 /* 0x10 and 0x11 is unused in currently application case */
127 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
128 HNS_ROCE_EVENT_TYPE_MB = 0x13,
129 HNS_ROCE_EVENT_TYPE_FLR = 0x15,
130 HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
131 HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
135 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
136 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
137 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
138 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
139 HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
140 HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
141 HNS_ROCE_CAP_FLAG_XRC = BIT(6),
142 HNS_ROCE_CAP_FLAG_MW = BIT(7),
143 HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
144 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
145 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
146 HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
147 HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
148 HNS_ROCE_CAP_FLAG_STASH = BIT(17),
149 HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
150 HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22),
153 #define HNS_ROCE_DB_TYPE_COUNT 2
154 #define HNS_ROCE_DB_UNIT_SIZE 4
157 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
160 enum hns_roce_reset_stage {
161 HNS_ROCE_STATE_NON_RST,
162 HNS_ROCE_STATE_RST_BEF_DOWN,
163 HNS_ROCE_STATE_RST_DOWN,
164 HNS_ROCE_STATE_RST_UNINIT,
165 HNS_ROCE_STATE_RST_INIT,
166 HNS_ROCE_STATE_RST_INITED,
169 enum hns_roce_instance_state {
170 HNS_ROCE_STATE_NON_INIT,
172 HNS_ROCE_STATE_INITED,
173 HNS_ROCE_STATE_UNINIT,
177 HNS_ROCE_RST_DIRECT_RETURN = 0,
180 #define HNS_ROCE_CMD_SUCCESS 1
182 #define HNS_ROCE_MAX_HOP_NUM 3
183 /* The minimum page size is 4K for hardware */
184 #define HNS_HW_PAGE_SHIFT 12
185 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
187 struct hns_roce_uar {
190 unsigned long logic_idx;
193 enum hns_roce_mmap_type {
194 HNS_ROCE_MMAP_TYPE_DB = 1,
195 HNS_ROCE_MMAP_TYPE_DWQE,
198 struct hns_user_mmap_entry {
199 struct rdma_user_mmap_entry rdma_entry;
200 enum hns_roce_mmap_type mmap_type;
204 struct hns_roce_ucontext {
205 struct ib_ucontext ibucontext;
206 struct hns_roce_uar uar;
207 struct list_head page_list;
208 struct mutex page_mutex;
209 struct hns_user_mmap_entry *db_mmap_entry;
218 struct hns_roce_xrcd {
219 struct ib_xrcd ibxrcd;
223 struct hns_roce_bitmap {
224 /* Bitmap Traversal last a bit which is 1 */
228 unsigned long reserved_top;
231 unsigned long *table;
234 struct hns_roce_ida {
236 u32 min; /* Lowest ID to allocate. */
237 u32 max; /* Highest ID to allocate. */
240 /* For Hardware Entry Memory */
241 struct hns_roce_hem_table {
242 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
244 /* HEM array elment num */
245 unsigned long num_hem;
246 /* Single obj size */
247 unsigned long obj_size;
248 unsigned long table_chunk_size;
250 struct hns_roce_hem **hem;
252 dma_addr_t *bt_l1_dma_addr;
254 dma_addr_t *bt_l0_dma_addr;
257 struct hns_roce_buf_region {
258 u32 offset; /* page offset */
259 u32 count; /* page count */
260 int hopnum; /* addressing hop num */
263 #define HNS_ROCE_MAX_BT_REGION 3
264 #define HNS_ROCE_MAX_BT_LEVEL 3
265 struct hns_roce_hem_list {
266 struct list_head root_bt;
267 /* link all bt dma mem by hop config */
268 struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
269 struct list_head btm_bt; /* link all bottom bt in @mid_bt */
270 dma_addr_t root_ba; /* pointer to the root ba table */
278 struct hns_roce_buf_attr {
280 size_t size; /* region size */
281 int hopnum; /* multi-hop addressing hop num */
282 } region[HNS_ROCE_MAX_BT_REGION];
283 unsigned int region_count; /* valid region count */
284 unsigned int page_shift; /* buffer page shift */
285 unsigned int user_access; /* umem access flag */
288 bool mtt_only; /* only alloc buffer-required MTT memory */
289 bool adaptive; /* adaptive for page_shift and hopnum */
292 struct hns_roce_hem_cfg {
293 dma_addr_t root_ba; /* root BA table's address */
294 bool is_direct; /* addressing without BA table */
295 unsigned int ba_pg_shift; /* BA table page shift */
296 unsigned int buf_pg_shift; /* buffer page shift */
297 unsigned int buf_pg_count; /* buffer page count */
298 struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
299 unsigned int region_count;
302 /* memory translate region */
303 struct hns_roce_mtr {
304 struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
305 struct ib_umem *umem; /* user space buffer */
306 struct hns_roce_buf *kmem; /* kernel space buffer */
307 struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
314 int enabled; /* MW's active status */
322 u64 iova; /* MR's virtual original addr */
323 u64 size; /* Address range of MR */
324 u32 key; /* Key of MR */
325 u32 pd; /* PD num of MR */
326 u32 access; /* Access permission of MR */
327 int enabled; /* MR's active status */
328 int type; /* MR's register type */
329 u32 pbl_hop_num; /* multi-hop number */
330 struct hns_roce_mtr pbl_mtr;
332 dma_addr_t *page_list;
335 struct hns_roce_mr_table {
336 struct hns_roce_ida mtpt_ida;
337 struct hns_roce_hem_table mtpt_table;
341 u64 *wrid; /* Work request ID */
343 u32 wqe_cnt; /* WQE num */
347 u32 wqe_shift; /* WQE size */
350 void __iomem *db_reg;
354 struct hns_roce_sge {
355 unsigned int sge_cnt; /* SGE num */
357 u32 sge_shift; /* SGE size */
360 struct hns_roce_buf_list {
366 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
369 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
371 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
372 * the allocated size is smaller than the required size.
375 HNS_ROCE_BUF_DIRECT = BIT(0),
376 HNS_ROCE_BUF_NOSLEEP = BIT(1),
377 HNS_ROCE_BUF_NOFAIL = BIT(2),
380 struct hns_roce_buf {
381 struct hns_roce_buf_list *trunk_list;
384 unsigned int trunk_shift;
385 unsigned int page_shift;
388 struct hns_roce_db_pgdir {
389 struct list_head list;
390 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
391 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
392 unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
397 struct hns_roce_user_db_page {
398 struct list_head list;
399 struct ib_umem *umem;
400 unsigned long user_virt;
407 struct hns_roce_db_pgdir *pgdir;
408 struct hns_roce_user_db_page *user_page;
418 struct hns_roce_mtr mtr;
419 struct hns_roce_db db;
425 void __iomem *db_reg;
431 struct completion free;
432 struct list_head sq_list; /* all qps on this send cq */
433 struct list_head rq_list; /* all qps on this recv cq */
434 int is_armed; /* cq is armed */
435 struct list_head node; /* all armed cqs are on a list */
438 struct hns_roce_idx_que {
439 struct hns_roce_mtr mtr;
441 unsigned long *bitmap;
446 struct hns_roce_srq {
455 void __iomem *db_reg;
458 struct completion free;
460 struct hns_roce_mtr buf_mtr;
463 struct hns_roce_idx_que idx_que;
466 void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
467 struct hns_roce_db rdb;
471 struct hns_roce_uar_table {
472 struct hns_roce_bitmap bitmap;
475 struct hns_roce_bank {
477 u32 inuse; /* Number of IDs allocated */
478 u32 min; /* Lowest ID to allocate. */
479 u32 max; /* Highest ID to allocate. */
480 u32 next; /* Next ID to allocate. */
483 struct hns_roce_idx_table {
489 struct hns_roce_qp_table {
490 struct hns_roce_hem_table qp_table;
491 struct hns_roce_hem_table irrl_table;
492 struct hns_roce_hem_table trrl_table;
493 struct hns_roce_hem_table sccc_table;
494 struct mutex scc_mutex;
495 struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
496 struct mutex bank_mutex;
497 struct hns_roce_idx_table idx_table;
500 struct hns_roce_cq_table {
502 struct hns_roce_hem_table table;
503 struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
504 struct mutex bank_mutex;
507 struct hns_roce_srq_table {
508 struct hns_roce_ida srq_ida;
510 struct hns_roce_hem_table table;
522 u8 dgid[HNS_ROCE_GID_SIZE];
530 struct hns_roce_av av;
533 struct hns_roce_cmd_context {
534 struct completion done;
542 enum hns_roce_cmdq_state {
543 HNS_ROCE_CMDQ_STATE_NORMAL,
544 HNS_ROCE_CMDQ_STATE_FATAL_ERR,
547 struct hns_roce_cmdq {
548 struct dma_pool *pool;
549 struct semaphore poll_sem;
551 * Event mode: cmd register mutex protection,
552 * ensure to not exceed max_cmds and user use limit region
554 struct semaphore event_sem;
556 spinlock_t context_lock;
558 struct hns_roce_cmd_context *context;
560 * Process whether use event mode, init default non-zero
561 * After the event queue of cmd event ready,
562 * can switch into event mode
563 * close device, switch into poll mode(non event mode)
566 enum hns_roce_cmdq_state state;
569 struct hns_roce_cmd_mailbox {
574 struct hns_roce_mbox_msg {
586 HNS_ROCE_FLUSH_FLAG = 0,
589 struct hns_roce_work {
590 struct hns_roce_dev *hr_dev;
591 struct work_struct work;
597 enum hns_roce_cong_type {
606 struct hns_roce_wq rq;
607 struct hns_roce_db rdb;
608 struct hns_roce_db sdb;
609 unsigned long en_flags;
610 enum ib_sig_type sq_signal_bits;
611 struct hns_roce_wq sq;
613 struct hns_roce_mtr mtr;
624 void (*event)(struct hns_roce_qp *qp,
625 enum hns_roce_event event_type);
631 struct completion free;
633 struct hns_roce_sge sge;
635 enum ib_mtu path_mtu;
639 /* 0: flush needed, 1: unneeded */
640 unsigned long flush_flag;
641 struct hns_roce_work flush_work;
642 struct list_head node; /* all qps are on a list */
643 struct list_head rq_node; /* all recv qps are on a list */
644 struct list_head sq_node; /* all send qps are on a list */
645 struct hns_user_mmap_entry *dwqe_mmap_entry;
647 enum hns_roce_cong_type cong_type;
650 struct hns_roce_ib_iboe {
652 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
653 struct notifier_block nb;
654 u8 phy_port[HNS_ROCE_MAX_PORTS];
657 struct hns_roce_ceqe {
662 #define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)
664 #define CEQE_CQN CEQE_FIELD_LOC(23, 0)
665 #define CEQE_OWNER CEQE_FIELD_LOC(31, 31)
667 struct hns_roce_aeqe {
686 #define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)
688 #define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
689 #define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
690 #define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
691 #define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)
694 struct hns_roce_dev *hr_dev;
695 void __iomem *db_reg;
697 int type_flag; /* Aeq:1 ceq:0 */
707 struct hns_roce_mtr mtr;
715 struct hns_roce_eq_table {
716 struct hns_roce_eq *eq;
719 struct hns_roce_caps {
722 int gid_table_len[HNS_ROCE_MAX_PORTS];
723 int pkey_table_len[HNS_ROCE_MAX_PORTS];
724 int local_ca_ack_delay;
740 int max_qp_init_rdma;
741 int max_qp_dest_rdma;
749 int num_comp_vectors;
750 int num_other_vectors;
771 int qpc_timer_entry_sz;
772 int cqc_timer_entry_sz;
784 u32 qpc_timer_bt_num;
787 u32 cqc_timer_bt_num;
815 u32 qpc_timer_ba_pg_sz;
816 u32 qpc_timer_buf_pg_sz;
817 u32 qpc_timer_hop_num;
818 u32 cqc_timer_ba_pg_sz;
819 u32 cqc_timer_buf_pg_sz;
820 u32 cqc_timer_hop_num;
821 u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
825 u32 srqwqe_buf_pg_sz;
840 u32 chunk_sz; /* chunk size in non multihop mode */
842 u16 default_ceq_max_cnt;
843 u16 default_ceq_period;
844 u16 default_aeq_max_cnt;
845 u16 default_aeq_period;
846 u16 default_aeq_arm_st;
847 u16 default_ceq_arm_st;
849 enum hns_roce_cong_type default_cong_type;
852 enum hns_roce_device_state {
853 HNS_ROCE_DEVICE_STATE_INITED,
854 HNS_ROCE_DEVICE_STATE_RST_DOWN,
855 HNS_ROCE_DEVICE_STATE_UNINIT,
858 enum hns_roce_hw_pkt_stat_index {
859 HNS_ROCE_HW_RX_RC_PKT_CNT,
860 HNS_ROCE_HW_RX_UC_PKT_CNT,
861 HNS_ROCE_HW_RX_UD_PKT_CNT,
862 HNS_ROCE_HW_RX_XRC_PKT_CNT,
863 HNS_ROCE_HW_RX_PKT_CNT,
864 HNS_ROCE_HW_RX_ERR_PKT_CNT,
865 HNS_ROCE_HW_RX_CNP_PKT_CNT,
866 HNS_ROCE_HW_TX_RC_PKT_CNT,
867 HNS_ROCE_HW_TX_UC_PKT_CNT,
868 HNS_ROCE_HW_TX_UD_PKT_CNT,
869 HNS_ROCE_HW_TX_XRC_PKT_CNT,
870 HNS_ROCE_HW_TX_PKT_CNT,
871 HNS_ROCE_HW_TX_ERR_PKT_CNT,
872 HNS_ROCE_HW_TX_CNP_PKT_CNT,
873 HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT,
874 HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT,
875 HNS_ROCE_HW_ECN_DB_CNT,
876 HNS_ROCE_HW_RX_BUF_CNT,
877 HNS_ROCE_HW_TRP_RX_SOF_CNT,
878 HNS_ROCE_HW_CQ_CQE_CNT,
879 HNS_ROCE_HW_CQ_POE_CNT,
880 HNS_ROCE_HW_CQ_NOTIFY_CNT,
881 HNS_ROCE_HW_CNT_TOTAL
884 enum hns_roce_sw_dfx_stat_index {
885 HNS_ROCE_DFX_AEQE_CNT,
886 HNS_ROCE_DFX_CEQE_CNT,
887 HNS_ROCE_DFX_CMDS_CNT,
888 HNS_ROCE_DFX_CMDS_ERR_CNT,
889 HNS_ROCE_DFX_MBX_POSTED_CNT,
890 HNS_ROCE_DFX_MBX_POLLED_CNT,
891 HNS_ROCE_DFX_MBX_EVENT_CNT,
892 HNS_ROCE_DFX_QP_CREATE_ERR_CNT,
893 HNS_ROCE_DFX_QP_MODIFY_ERR_CNT,
894 HNS_ROCE_DFX_CQ_CREATE_ERR_CNT,
895 HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT,
896 HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT,
897 HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT,
898 HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT,
899 HNS_ROCE_DFX_MR_REG_ERR_CNT,
900 HNS_ROCE_DFX_MR_REREG_ERR_CNT,
901 HNS_ROCE_DFX_AH_CREATE_ERR_CNT,
902 HNS_ROCE_DFX_MMAP_ERR_CNT,
903 HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT,
904 HNS_ROCE_DFX_CNT_TOTAL
908 int (*cmq_init)(struct hns_roce_dev *hr_dev);
909 void (*cmq_exit)(struct hns_roce_dev *hr_dev);
910 int (*hw_profile)(struct hns_roce_dev *hr_dev);
911 int (*hw_init)(struct hns_roce_dev *hr_dev);
912 void (*hw_exit)(struct hns_roce_dev *hr_dev);
913 int (*post_mbox)(struct hns_roce_dev *hr_dev,
914 struct hns_roce_mbox_msg *mbox_msg);
915 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
916 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
917 int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
918 const union ib_gid *gid, const struct ib_gid_attr *attr);
919 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
921 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
922 struct hns_roce_mr *mr);
923 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
924 struct hns_roce_mr *mr, int flags,
926 int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
927 struct hns_roce_mr *mr);
928 int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
929 void (*write_cqc)(struct hns_roce_dev *hr_dev,
930 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
931 dma_addr_t dma_handle);
932 int (*set_hem)(struct hns_roce_dev *hr_dev,
933 struct hns_roce_hem_table *table, int obj, u32 step_idx);
934 int (*clear_hem)(struct hns_roce_dev *hr_dev,
935 struct hns_roce_hem_table *table, int obj,
937 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
938 int attr_mask, enum ib_qp_state cur_state,
939 enum ib_qp_state new_state, struct ib_udata *udata);
940 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
941 struct hns_roce_qp *hr_qp);
942 void (*dereg_mr)(struct hns_roce_dev *hr_dev);
943 int (*init_eq)(struct hns_roce_dev *hr_dev);
944 void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
945 int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
946 int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
947 int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
948 int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
949 int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
950 int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
951 int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
952 u64 *stats, u32 port, int *hw_counters);
953 const struct ib_device_ops *hns_roce_dev_ops;
954 const struct ib_device_ops *hns_roce_dev_srq_ops;
957 struct hns_roce_dev {
958 struct ib_device ib_dev;
959 struct pci_dev *pci_dev;
961 struct hns_roce_uar priv_uar;
962 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
967 unsigned long reset_cnt;
968 struct hns_roce_ib_iboe iboe;
969 enum hns_roce_device_state state;
970 struct list_head qp_list; /* list of all qps on this dev */
971 spinlock_t qp_list_lock; /* protect qp_list */
972 struct list_head dip_list; /* list of all dest ips on this dev */
973 spinlock_t dip_list_lock; /* protect dip_list */
975 struct list_head pgdir_list;
976 struct mutex pgdir_mutex;
977 int irq[HNS_ROCE_MAX_IRQ_NUM];
978 u8 __iomem *reg_base;
979 void __iomem *mem_base;
980 struct hns_roce_caps caps;
981 struct xarray qp_table_xa;
983 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
988 void __iomem *priv_addr;
990 struct hns_roce_cmdq cmd;
991 struct hns_roce_ida pd_ida;
992 struct hns_roce_ida xrcd_ida;
993 struct hns_roce_ida uar_ida;
994 struct hns_roce_mr_table mr_table;
995 struct hns_roce_cq_table cq_table;
996 struct hns_roce_srq_table srq_table;
997 struct hns_roce_qp_table qp_table;
998 struct hns_roce_eq_table eq_table;
999 struct hns_roce_hem_table qpc_timer_table;
1000 struct hns_roce_hem_table cqc_timer_table;
1001 /* GMV is the memory area that the driver allocates for the hardware
1002 * to store SGID, SMAC and VLAN information.
1004 struct hns_roce_hem_table gmv_table;
1010 const struct hns_roce_hw *hw;
1012 struct workqueue_struct *irq_workq;
1013 struct work_struct ecc_work;
1016 u32 cong_algo_tmpl_id;
1018 struct hns_roce_dev_debugfs dbgfs;
1019 atomic64_t *dfx_cnt;
1022 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
1024 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
1027 static inline struct hns_roce_ucontext
1028 *to_hr_ucontext(struct ib_ucontext *ibucontext)
1030 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
1033 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
1035 return container_of(ibpd, struct hns_roce_pd, ibpd);
1038 static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
1040 return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
1043 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
1045 return container_of(ibah, struct hns_roce_ah, ibah);
1048 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
1050 return container_of(ibmr, struct hns_roce_mr, ibmr);
1053 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
1055 return container_of(ibmw, struct hns_roce_mw, ibmw);
1058 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
1060 return container_of(ibqp, struct hns_roce_qp, ibqp);
1063 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
1065 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
1068 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1070 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1073 static inline struct hns_user_mmap_entry *
1074 to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
1076 return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
1079 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1081 writeq(*(u64 *)val, dest);
1084 static inline struct hns_roce_qp
1085 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1087 return xa_load(&hr_dev->qp_table_xa, qpn);
1090 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
1091 unsigned int offset)
1093 return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
1094 (offset & ((1 << buf->trunk_shift) - 1));
1097 static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
1098 unsigned int offset)
1100 return buf->trunk_list[offset >> buf->trunk_shift].map +
1101 (offset & ((1 << buf->trunk_shift) - 1));
1104 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1106 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
1109 #define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1111 static inline u64 to_hr_hw_page_addr(u64 addr)
1113 return addr >> HNS_HW_PAGE_SHIFT;
1116 static inline u32 to_hr_hw_page_shift(u32 page_shift)
1118 return page_shift - HNS_HW_PAGE_SHIFT;
1121 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1124 return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1129 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1131 return hr_hw_page_align(count << buf_shift);
1134 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1136 return hr_hw_page_align(count << buf_shift) >> buf_shift;
1139 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1144 return ilog2(to_hr_hem_entries_count(count, buf_shift));
1147 #define DSCP_SHIFT 2
1149 static inline u8 get_tclass(const struct ib_global_route *grh)
1151 return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1152 grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1155 void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1156 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1158 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1159 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1160 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1162 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1163 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1165 /* hns roce hw need current block and next block addr from mtt */
1166 #define MTT_MIN_COUNT 2
1167 static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
1169 return mtr->hem_cfg.root_ba;
1172 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1173 u32 offset, u64 *mtt_buf, int mtt_max);
1174 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1175 struct hns_roce_buf_attr *buf_attr,
1176 unsigned int page_shift, struct ib_udata *udata,
1177 unsigned long user_addr);
1178 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1179 struct hns_roce_mtr *mtr);
1180 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1181 dma_addr_t *pages, unsigned int page_cnt);
1183 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1184 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1185 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1186 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1187 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1188 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1190 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1191 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1193 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1195 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1196 struct ib_udata *udata);
1197 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1198 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1203 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1204 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1206 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1207 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1208 u64 virt_addr, int access_flags,
1209 struct ib_udata *udata);
1210 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
1211 u64 length, u64 virt_addr,
1212 int mr_access_flags, struct ib_pd *pd,
1213 struct ib_udata *udata);
1214 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1216 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1217 unsigned int *sg_offset);
1218 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1219 unsigned long key_to_hw_index(u32 key);
1221 int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1222 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1224 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1225 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
1226 u32 page_shift, u32 flags);
1228 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1229 int buf_cnt, struct hns_roce_buf *buf,
1230 unsigned int page_shift);
1231 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1232 int buf_cnt, struct ib_umem *umem,
1233 unsigned int page_shift);
1235 int hns_roce_create_srq(struct ib_srq *srq,
1236 struct ib_srq_init_attr *srq_init_attr,
1237 struct ib_udata *udata);
1238 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1240 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1241 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1243 int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
1244 struct ib_udata *udata);
1245 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1246 int attr_mask, struct ib_udata *udata);
1247 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1248 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1249 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1250 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
1251 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1252 struct ib_cq *ib_cq);
1253 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1254 struct hns_roce_cq *recv_cq);
1255 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1256 struct hns_roce_cq *recv_cq);
1257 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1258 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1259 struct ib_udata *udata);
1260 __be32 send_ieth(const struct ib_send_wr *wr);
1261 int to_hr_qp_type(int qp_type);
1263 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1264 struct ib_udata *udata);
1266 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1267 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1268 struct hns_roce_db *db);
1269 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1270 struct hns_roce_db *db);
1271 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1273 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1275 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1276 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1277 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1278 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1279 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1280 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1281 int hns_roce_init(struct hns_roce_dev *hr_dev);
1282 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1283 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
1284 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
1285 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
1286 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
1287 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
1288 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
1289 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq);
1290 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq);
1291 struct hns_user_mmap_entry *
1292 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
1294 enum hns_roce_mmap_type mmap_type);
1295 #endif /* _HNS_ROCE_DEVICE_H */