1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req *req = NULL;
17 struct mana_cfg_rx_steer_resp resp = {};
18 mana_handle_t *req_indir_tab;
19 struct gdma_context *gc;
20 struct gdma_dev *mdev;
25 gc = mdev->gdma_context;
28 sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
29 req = kzalloc(req_buf_size, GFP_KERNEL);
33 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
36 req->vport = mpc->port_handle;
38 req->update_default_rxobj = 1;
39 req->default_rxobj = default_rxobj;
40 req->hdr.dev_id = mdev->dev_id;
42 /* If there are more than 1 entries in indirection table, enable RSS */
44 req->rss_enable = true;
46 req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
47 req->indir_tab_offset = sizeof(*req);
48 req->update_indir_tab = true;
50 req_indir_tab = (mana_handle_t *)(req + 1);
51 /* The ind table passed to the hardware must have
52 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
53 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
55 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
56 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
57 req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
58 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
62 req->update_hashkey = true;
64 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
66 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
68 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
69 req->vport, default_rxobj);
71 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
73 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
77 if (resp.hdr.status) {
78 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
84 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
85 mpc->port_handle, log_ind_tbl_size);
92 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
93 struct ib_qp_init_attr *attr,
94 struct ib_udata *udata)
96 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
97 struct mana_ib_dev *mdev =
98 container_of(pd->device, struct mana_ib_dev, ib_dev);
99 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
100 struct mana_ib_create_qp_rss_resp resp = {};
101 struct mana_ib_create_qp_rss ucmd = {};
102 struct gdma_dev *gd = mdev->gdma_dev;
103 mana_handle_t *mana_ind_table;
104 struct mana_port_context *mpc;
105 struct mana_context *mc;
106 struct net_device *ndev;
107 struct mana_ib_cq *cq;
108 struct mana_ib_wq *wq;
109 unsigned int ind_tbl_size;
116 mc = gd->driver_data;
118 if (!udata || udata->inlen < sizeof(ucmd))
121 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
123 ibdev_dbg(&mdev->ib_dev,
124 "Failed copy from udata for create rss-qp, err %d\n",
129 if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
130 ibdev_dbg(&mdev->ib_dev,
131 "Requested max_recv_wr %d exceeding limit\n",
132 attr->cap.max_recv_wr);
136 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
137 ibdev_dbg(&mdev->ib_dev,
138 "Requested max_recv_sge %d exceeding limit\n",
139 attr->cap.max_recv_sge);
143 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
144 if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
145 ibdev_dbg(&mdev->ib_dev,
146 "Indirect table size %d exceeding limit\n",
151 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
152 ibdev_dbg(&mdev->ib_dev,
153 "RX Hash function is not supported, %d\n",
154 ucmd.rx_hash_function);
158 /* IB ports start with 1, MANA start with 0 */
160 if (port < 1 || port > mc->num_ports) {
161 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
165 ndev = mc->ports[port - 1];
166 mpc = netdev_priv(ndev);
168 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
169 ucmd.rx_hash_function, port);
171 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
173 if (!mana_ind_table) {
180 for (i = 0; i < ind_tbl_size; i++) {
181 struct mana_obj_spec wq_spec = {};
182 struct mana_obj_spec cq_spec = {};
184 ibwq = ind_tbl->ind_tbl[i];
185 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
188 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
190 wq_spec.gdma_region = wq->gdma_region;
191 wq_spec.queue_size = wq->wq_buf_size;
193 cq_spec.gdma_region = cq->gdma_region;
194 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
195 cq_spec.modr_ctx_id = 0;
196 cq_spec.attached_eq = GDMA_CQ_NO_EQ;
198 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
199 &wq_spec, &cq_spec, &wq->rx_object);
203 /* The GDMA regions are now owned by the WQ object */
204 wq->gdma_region = GDMA_INVALID_DMA_REGION;
205 cq->gdma_region = GDMA_INVALID_DMA_REGION;
207 wq->id = wq_spec.queue_index;
208 cq->id = cq_spec.queue_index;
210 ibdev_dbg(&mdev->ib_dev,
211 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
212 ret, wq->rx_object, wq->id, cq->id);
214 resp.entries[i].cqid = cq->id;
215 resp.entries[i].wqid = wq->id;
217 mana_ind_table[i] = wq->rx_object;
219 resp.num_entries = i;
221 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
223 ind_tbl->log_ind_tbl_size,
224 ucmd.rx_hash_key_len,
229 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
231 ibdev_dbg(&mdev->ib_dev,
232 "Failed to copy to udata create rss-qp, %d\n",
237 kfree(mana_ind_table);
243 ibwq = ind_tbl->ind_tbl[i];
244 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
245 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
248 kfree(mana_ind_table);
253 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
254 struct ib_qp_init_attr *attr,
255 struct ib_udata *udata)
257 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
258 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
259 struct mana_ib_dev *mdev =
260 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
261 struct mana_ib_cq *send_cq =
262 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
263 struct mana_ib_ucontext *mana_ucontext =
264 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
266 struct mana_ib_create_qp_resp resp = {};
267 struct gdma_dev *gd = mdev->gdma_dev;
268 struct mana_ib_create_qp ucmd = {};
269 struct mana_obj_spec wq_spec = {};
270 struct mana_obj_spec cq_spec = {};
271 struct mana_port_context *mpc;
272 struct mana_context *mc;
273 struct net_device *ndev;
274 struct ib_umem *umem;
278 mc = gd->driver_data;
280 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
283 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
285 ibdev_dbg(&mdev->ib_dev,
286 "Failed to copy from udata create qp-raw, %d\n", err);
290 /* IB ports start with 1, MANA Ethernet ports start with 0 */
292 if (port < 1 || port > mc->num_ports)
295 if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
296 ibdev_dbg(&mdev->ib_dev,
297 "Requested max_send_wr %d exceeding limit\n",
298 attr->cap.max_send_wr);
302 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
303 ibdev_dbg(&mdev->ib_dev,
304 "Requested max_send_sge %d exceeding limit\n",
305 attr->cap.max_send_sge);
309 ndev = mc->ports[port - 1];
310 mpc = netdev_priv(ndev);
311 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
313 err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
319 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
320 ucmd.sq_buf_addr, ucmd.port);
322 umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
323 IB_ACCESS_LOCAL_WRITE);
326 ibdev_dbg(&mdev->ib_dev,
327 "Failed to get umem for create qp-raw, err %d\n",
333 err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
334 &qp->sq_gdma_region);
336 ibdev_dbg(&mdev->ib_dev,
337 "Failed to create dma region for create qp-raw, %d\n",
339 goto err_release_umem;
342 ibdev_dbg(&mdev->ib_dev,
343 "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
344 err, qp->sq_gdma_region);
346 /* Create a WQ on the same port handle used by the Ethernet */
347 wq_spec.gdma_region = qp->sq_gdma_region;
348 wq_spec.queue_size = ucmd.sq_buf_size;
350 cq_spec.gdma_region = send_cq->gdma_region;
351 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
352 cq_spec.modr_ctx_id = 0;
353 cq_spec.attached_eq = GDMA_CQ_NO_EQ;
355 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
356 &cq_spec, &qp->tx_object);
358 ibdev_dbg(&mdev->ib_dev,
359 "Failed to create wq for create raw-qp, err %d\n",
361 goto err_destroy_dma_region;
364 /* The GDMA regions are now owned by the WQ object */
365 qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
366 send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
368 qp->sq_id = wq_spec.queue_index;
369 send_cq->id = cq_spec.queue_index;
371 ibdev_dbg(&mdev->ib_dev,
372 "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
373 qp->tx_object, qp->sq_id, send_cq->id);
375 resp.sqid = qp->sq_id;
376 resp.cqid = send_cq->id;
377 resp.tx_vp_offset = pd->tx_vp_offset;
379 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
381 ibdev_dbg(&mdev->ib_dev,
382 "Failed copy udata for create qp-raw, %d\n",
384 goto err_destroy_wq_obj;
390 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
392 err_destroy_dma_region:
393 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
396 ib_umem_release(umem);
399 mana_ib_uncfg_vport(mdev, pd, port - 1);
404 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
405 struct ib_udata *udata)
407 switch (attr->qp_type) {
408 case IB_QPT_RAW_PACKET:
409 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
410 if (attr->rwq_ind_tbl)
411 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
414 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
416 /* Creating QP other than IB_QPT_RAW_PACKET is not supported */
417 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
424 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
425 int attr_mask, struct ib_udata *udata)
427 /* modify_qp is not supported by this version of the driver */
431 static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
432 struct ib_rwq_ind_table *ind_tbl,
433 struct ib_udata *udata)
435 struct mana_ib_dev *mdev =
436 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
437 struct gdma_dev *gd = mdev->gdma_dev;
438 struct mana_port_context *mpc;
439 struct mana_context *mc;
440 struct net_device *ndev;
441 struct mana_ib_wq *wq;
445 mc = gd->driver_data;
446 ndev = mc->ports[qp->port - 1];
447 mpc = netdev_priv(ndev);
449 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
450 ibwq = ind_tbl->ind_tbl[i];
451 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
452 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
454 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
460 static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
462 struct mana_ib_dev *mdev =
463 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
464 struct gdma_dev *gd = mdev->gdma_dev;
465 struct ib_pd *ibpd = qp->ibqp.pd;
466 struct mana_port_context *mpc;
467 struct mana_context *mc;
468 struct net_device *ndev;
469 struct mana_ib_pd *pd;
471 mc = gd->driver_data;
472 ndev = mc->ports[qp->port - 1];
473 mpc = netdev_priv(ndev);
474 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
476 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
479 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
480 ib_umem_release(qp->sq_umem);
483 mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
488 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
490 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
492 switch (ibqp->qp_type) {
493 case IB_QPT_RAW_PACKET:
494 if (ibqp->rwq_ind_tbl)
495 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
498 return mana_ib_destroy_qp_raw(qp, udata);
501 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",