2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/rdma_netlink.h>
35 #include <net/addrconf.h>
39 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
40 MODULE_DESCRIPTION("Soft RDMA transport");
41 MODULE_LICENSE("Dual BSD/GPL");
43 /* free resources for a rxe device all objects created for this device must
46 void rxe_dealloc(struct ib_device *ib_dev)
48 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
50 rxe_pool_cleanup(&rxe->uc_pool);
51 rxe_pool_cleanup(&rxe->pd_pool);
52 rxe_pool_cleanup(&rxe->ah_pool);
53 rxe_pool_cleanup(&rxe->srq_pool);
54 rxe_pool_cleanup(&rxe->qp_pool);
55 rxe_pool_cleanup(&rxe->cq_pool);
56 rxe_pool_cleanup(&rxe->mr_pool);
57 rxe_pool_cleanup(&rxe->mw_pool);
58 rxe_pool_cleanup(&rxe->mc_grp_pool);
59 rxe_pool_cleanup(&rxe->mc_elem_pool);
62 crypto_free_shash(rxe->tfm);
65 /* initialize rxe device parameters */
66 static void rxe_init_device_param(struct rxe_dev *rxe)
68 rxe->max_inline_data = RXE_MAX_INLINE_DATA;
70 rxe->attr.vendor_id = RXE_VENDOR_ID;
71 rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
72 rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
73 rxe->attr.max_qp = RXE_MAX_QP;
74 rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
75 rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
76 rxe->attr.max_send_sge = RXE_MAX_SGE;
77 rxe->attr.max_recv_sge = RXE_MAX_SGE;
78 rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
79 rxe->attr.max_cq = RXE_MAX_CQ;
80 rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
81 rxe->attr.max_mr = RXE_MAX_MR;
82 rxe->attr.max_pd = RXE_MAX_PD;
83 rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
84 rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
85 rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
86 rxe->attr.atomic_cap = IB_ATOMIC_HCA;
87 rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
88 rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
89 rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
90 rxe->attr.max_ah = RXE_MAX_AH;
91 rxe->attr.max_srq = RXE_MAX_SRQ;
92 rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
93 rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
94 rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
95 rxe->attr.max_pkeys = RXE_MAX_PKEYS;
96 rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
97 addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
100 rxe->max_ucontext = RXE_MAX_UCONTEXT;
103 /* initialize port attributes */
104 static void rxe_init_port_param(struct rxe_port *port)
106 port->attr.state = IB_PORT_DOWN;
107 port->attr.max_mtu = IB_MTU_4096;
108 port->attr.active_mtu = IB_MTU_256;
109 port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
110 port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
111 port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
112 port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
113 port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
114 port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
115 port->attr.lid = RXE_PORT_LID;
116 port->attr.sm_lid = RXE_PORT_SM_LID;
117 port->attr.lmc = RXE_PORT_LMC;
118 port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
119 port->attr.sm_sl = RXE_PORT_SM_SL;
120 port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
121 port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
122 port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
123 port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
124 port->attr.phys_state = RXE_PORT_PHYS_STATE;
125 port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
126 port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
129 /* initialize port state, note IB convention that HCA ports are always
132 static void rxe_init_ports(struct rxe_dev *rxe)
134 struct rxe_port *port = &rxe->port;
136 rxe_init_port_param(port);
137 addrconf_addr_eui48((unsigned char *)&port->port_guid,
138 rxe->ndev->dev_addr);
139 spin_lock_init(&port->port_lock);
142 /* init pools of managed objects */
143 static int rxe_init_pools(struct rxe_dev *rxe)
147 err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
152 err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
157 err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
162 err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
167 err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
172 err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
177 err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
182 err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
187 err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
188 rxe->attr.max_mcast_grp);
192 err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
193 rxe->attr.max_total_mcast_qp_attach);
200 rxe_pool_cleanup(&rxe->mc_grp_pool);
202 rxe_pool_cleanup(&rxe->mw_pool);
204 rxe_pool_cleanup(&rxe->mr_pool);
206 rxe_pool_cleanup(&rxe->cq_pool);
208 rxe_pool_cleanup(&rxe->qp_pool);
210 rxe_pool_cleanup(&rxe->srq_pool);
212 rxe_pool_cleanup(&rxe->ah_pool);
214 rxe_pool_cleanup(&rxe->pd_pool);
216 rxe_pool_cleanup(&rxe->uc_pool);
221 /* initialize rxe device state */
222 static int rxe_init(struct rxe_dev *rxe)
226 /* init default device parameters */
227 rxe_init_device_param(rxe);
231 err = rxe_init_pools(rxe);
235 /* init pending mmap list */
236 spin_lock_init(&rxe->mmap_offset_lock);
237 spin_lock_init(&rxe->pending_lock);
238 INIT_LIST_HEAD(&rxe->pending_mmaps);
240 mutex_init(&rxe->usdev_lock);
245 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
247 struct rxe_port *port = &rxe->port;
250 mtu = eth_mtu_int_to_enum(ndev_mtu);
252 /* Make sure that new MTU in range */
253 mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
255 port->attr.active_mtu = mtu;
256 port->mtu_cap = ib_mtu_enum_to_int(mtu);
259 /* called by ifc layer to create new rxe device.
260 * The caller should allocate memory for rxe by calling ib_alloc_device.
262 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
270 rxe_set_mtu(rxe, mtu);
272 return rxe_register_device(rxe, ibdev_name);
275 static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
277 struct rxe_dev *exists;
280 exists = rxe_get_dev_from_net(ndev);
282 ib_device_put(&exists->ib_dev);
283 pr_err("already configured on %s\n", ndev->name);
288 err = rxe_net_add(ibdev_name, ndev);
290 pr_err("failed to add %s\n", ndev->name);
297 static struct rdma_link_ops rxe_link_ops = {
299 .newlink = rxe_newlink,
302 static int __init rxe_module_init(void)
306 /* initialize slab caches for managed objects */
307 err = rxe_cache_init();
309 pr_err("unable to init object pools\n");
313 err = rxe_net_init();
317 rdma_link_register(&rxe_link_ops);
322 static void __exit rxe_module_exit(void)
324 rdma_link_unregister(&rxe_link_ops);
325 ib_unregister_driver(RDMA_DRIVER_RXE);
329 pr_info("unloaded\n");
332 late_initcall(rxe_module_init);
333 module_exit(rxe_module_exit);
335 MODULE_ALIAS_RDMA_LINK("rxe");