2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <rdma/ib_umem.h>
36 #include <linux/atomic.h>
37 #include <rdma/ib_user_verbs.h>
42 module_param(use_dsgl, int, 0644);
43 MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
45 #define T4_ULPTX_MIN_IO 32
46 #define C4IW_MAX_INLINE_SIZE 96
47 #define T4_ULPTX_MAX_DMA 1024
48 #define C4IW_INLINE_THRESHOLD 128
50 static int inline_threshold = C4IW_INLINE_THRESHOLD;
51 module_param(inline_threshold, int, 0644);
52 MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
54 static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
56 return (is_t4(dev->rdev.lldi.adapter_type) ||
57 is_t5(dev->rdev.lldi.adapter_type)) &&
58 length >= 8*1024*1024*1024ULL;
61 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
62 u32 len, dma_addr_t data,
64 struct c4iw_wr_wait *wr_waitp)
66 struct ulp_mem_io *req;
67 struct ulptx_sgl *sgl;
74 c4iw_init_wr_wait(wr_waitp);
75 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
78 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
82 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
84 req = __skb_put_zero(skb, wr_len);
85 INIT_ULPTX_WR(req, wr_len, 0, 0);
86 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
87 (wr_waitp ? FW_WR_COMPL_F : 0));
88 req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
89 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
90 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
91 T5_ULP_MEMIO_ORDER_V(1) |
92 T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
93 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
94 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
95 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
97 sgl = (struct ulptx_sgl *)(req + 1);
98 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
100 sgl->len0 = cpu_to_be32(len);
101 sgl->addr0 = cpu_to_be64(data);
104 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
106 ret = c4iw_ofld_send(rdev, skb);
110 static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
111 void *data, struct sk_buff *skb,
112 struct c4iw_wr_wait *wr_waitp)
114 struct ulp_mem_io *req;
115 struct ulptx_idata *sc;
116 u8 wr_len, *to_dp, *from_dp;
117 int copy_len, num_wqe, i, ret = 0;
118 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
120 if (is_t4(rdev->lldi.adapter_type))
121 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
123 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
126 pr_debug("addr 0x%x len %u\n", addr, len);
127 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
128 c4iw_init_wr_wait(wr_waitp);
129 for (i = 0; i < num_wqe; i++) {
131 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
133 wr_len = roundup(sizeof *req + sizeof *sc +
134 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
137 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
141 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
143 req = __skb_put_zero(skb, wr_len);
144 INIT_ULPTX_WR(req, wr_len, 0, 0);
146 if (i == (num_wqe-1)) {
147 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
149 req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
151 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
152 req->wr.wr_mid = cpu_to_be32(
153 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
156 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
157 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
158 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
160 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
162 sc = (struct ulptx_idata *)(req + 1);
163 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
164 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
166 to_dp = (u8 *)(sc + 1);
167 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
169 memcpy(to_dp, from_dp, copy_len);
171 memset(to_dp, 0, copy_len);
172 if (copy_len % T4_ULPTX_MIN_IO)
173 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
174 (copy_len % T4_ULPTX_MIN_IO));
175 if (i == (num_wqe-1))
176 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
179 ret = c4iw_ofld_send(rdev, skb);
183 len -= C4IW_MAX_INLINE_SIZE;
189 static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
190 void *data, struct sk_buff *skb,
191 struct c4iw_wr_wait *wr_waitp)
199 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
200 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
204 while (remain > inline_threshold) {
205 if (remain < T4_ULPTX_MAX_DMA) {
206 if (remain & ~T4_ULPTX_MIN_IO)
207 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
211 dmalen = T4_ULPTX_MAX_DMA;
213 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
214 skb, remain ? NULL : wr_waitp);
222 ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
225 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
230 * write len bytes of data into addr (32B aligned address)
231 * If data is NULL, clear len byte of memory to zero.
233 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
234 void *data, struct sk_buff *skb,
235 struct c4iw_wr_wait *wr_waitp)
239 if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
240 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
245 if (len <= inline_threshold) {
246 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
251 ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
253 pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
254 pci_name(rdev->lldi.pdev));
255 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
264 * Build and write a TPT entry.
265 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
266 * pbl_size and pbl_addr
269 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
270 u32 *stag, u8 stag_state, u32 pdid,
271 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
272 int bind_enabled, u32 zbva, u64 to,
273 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
274 struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
277 struct fw_ri_tpte tpt;
281 if (c4iw_fatal_error(rdev))
284 stag_state = stag_state > 0;
285 stag_idx = (*stag) >> 8;
287 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
288 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
290 mutex_lock(&rdev->stats.lock);
291 rdev->stats.stag.fail++;
292 mutex_unlock(&rdev->stats.lock);
295 mutex_lock(&rdev->stats.lock);
296 rdev->stats.stag.cur += 32;
297 if (rdev->stats.stag.cur > rdev->stats.stag.max)
298 rdev->stats.stag.max = rdev->stats.stag.cur;
299 mutex_unlock(&rdev->stats.lock);
300 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
302 pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
303 stag_state, type, pdid, stag_idx);
305 /* write TPT entry */
307 memset(&tpt, 0, sizeof(tpt));
309 tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
310 FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
311 FW_RI_TPTE_STAGSTATE_V(stag_state) |
312 FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
313 tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
314 (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
315 FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
317 FW_RI_TPTE_PS_V(page_size));
318 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
319 FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
320 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
321 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
322 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
323 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
324 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
326 err = write_adapter_mem(rdev, stag_idx +
327 (rdev->lldi.vr->stag.start >> 5),
328 sizeof(tpt), &tpt, skb, wr_waitp);
330 if (reset_tpt_entry) {
331 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
332 mutex_lock(&rdev->stats.lock);
333 rdev->stats.stag.cur -= 32;
334 mutex_unlock(&rdev->stats.lock);
339 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
340 u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
344 pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
345 pbl_addr, rdev->lldi.vr->pbl.start,
348 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
353 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
354 u32 pbl_addr, struct sk_buff *skb,
355 struct c4iw_wr_wait *wr_waitp)
357 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
358 pbl_size, pbl_addr, skb, wr_waitp);
361 static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
362 struct c4iw_wr_wait *wr_waitp)
364 *stag = T4_STAG_UNSET;
365 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
366 0UL, 0, 0, 0, 0, NULL, wr_waitp);
369 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
371 struct c4iw_wr_wait *wr_waitp)
373 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
377 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
378 u32 pbl_size, u32 pbl_addr,
379 struct c4iw_wr_wait *wr_waitp)
381 *stag = T4_STAG_UNSET;
382 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
383 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
386 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
391 mhp->attr.stag = stag;
393 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
394 mhp->ibmr.length = mhp->attr.len;
395 mhp->ibmr.iova = mhp->attr.va_fbo;
396 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
397 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
398 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
401 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
402 struct c4iw_mr *mhp, int shift)
404 u32 stag = T4_STAG_UNSET;
407 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
408 FW_RI_STAG_NSMR, mhp->attr.len ?
410 mhp->attr.mw_bind_enable, mhp->attr.zbva,
411 mhp->attr.va_fbo, mhp->attr.len ?
412 mhp->attr.len : -1, shift - 12,
413 mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
418 ret = finish_mem_reg(mhp, stag);
420 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
421 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
422 mhp->dereg_skb = NULL;
427 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
429 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
432 if (!mhp->attr.pbl_addr)
435 mhp->attr.pbl_size = npages;
440 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
442 struct c4iw_dev *rhp;
446 u32 stag = T4_STAG_UNSET;
448 pr_debug("ib_pd %p\n", pd);
449 php = to_c4iw_pd(pd);
452 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
454 return ERR_PTR(-ENOMEM);
455 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
456 if (!mhp->wr_waitp) {
460 c4iw_init_wr_wait(mhp->wr_waitp);
462 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
463 if (!mhp->dereg_skb) {
465 goto err_free_wr_wait;
469 mhp->attr.pdid = php->pdid;
470 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
471 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
473 mhp->attr.va_fbo = 0;
474 mhp->attr.page_size = 0;
475 mhp->attr.len = ~0ULL;
476 mhp->attr.pbl_size = 0;
478 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
479 FW_RI_STAG_NSMR, mhp->attr.perms,
480 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
481 NULL, mhp->wr_waitp);
485 ret = finish_mem_reg(mhp, stag);
490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
493 kfree_skb(mhp->dereg_skb);
495 c4iw_put_wr_wait(mhp->wr_waitp);
501 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
502 u64 virt, int acc, struct ib_udata *udata)
507 struct sg_dma_page_iter sg_iter;
508 struct c4iw_dev *rhp;
512 pr_debug("ib_pd %p\n", pd);
515 return ERR_PTR(-EINVAL);
517 if ((length + start) < start)
518 return ERR_PTR(-EINVAL);
520 php = to_c4iw_pd(pd);
523 if (mr_exceeds_hw_limits(rhp, length))
524 return ERR_PTR(-EINVAL);
526 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
528 return ERR_PTR(-ENOMEM);
529 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
533 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
535 goto err_free_wr_wait;
539 mhp->umem = ib_umem_get(udata, start, length, acc, 0);
540 if (IS_ERR(mhp->umem))
545 n = ib_umem_num_pages(mhp->umem);
546 err = alloc_pbl(mhp, n);
548 goto err_umem_release;
550 pages = (__be64 *) __get_free_page(GFP_KERNEL);
558 for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
559 pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
560 if (i == PAGE_SIZE / sizeof(*pages)) {
561 err = write_pbl(&mhp->rhp->rdev, pages,
562 mhp->attr.pbl_addr + (n << 3), i,
572 err = write_pbl(&mhp->rhp->rdev, pages,
573 mhp->attr.pbl_addr + (n << 3), i,
577 free_page((unsigned long) pages);
581 mhp->attr.pdid = php->pdid;
583 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
584 mhp->attr.va_fbo = virt;
585 mhp->attr.page_size = shift - 12;
586 mhp->attr.len = length;
588 err = register_mem(rhp, php, mhp, shift);
595 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
596 mhp->attr.pbl_size << 3);
598 ib_umem_release(mhp->umem);
600 kfree_skb(mhp->dereg_skb);
602 c4iw_put_wr_wait(mhp->wr_waitp);
608 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
609 struct ib_udata *udata)
611 struct c4iw_dev *rhp;
618 if (type != IB_MW_TYPE_1)
619 return ERR_PTR(-EINVAL);
621 php = to_c4iw_pd(pd);
623 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
625 return ERR_PTR(-ENOMEM);
627 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
628 if (!mhp->wr_waitp) {
633 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
634 if (!mhp->dereg_skb) {
639 ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
643 mhp->attr.pdid = php->pdid;
644 mhp->attr.type = FW_RI_STAG_MW;
645 mhp->attr.stag = stag;
647 mhp->ibmw.rkey = stag;
648 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
652 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
656 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
659 kfree_skb(mhp->dereg_skb);
661 c4iw_put_wr_wait(mhp->wr_waitp);
667 int c4iw_dealloc_mw(struct ib_mw *mw)
669 struct c4iw_dev *rhp;
673 mhp = to_c4iw_mw(mw);
675 mmid = (mw->rkey) >> 8;
676 xa_erase_irq(&rhp->mrs, mmid);
677 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
679 kfree_skb(mhp->dereg_skb);
680 c4iw_put_wr_wait(mhp->wr_waitp);
681 pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
686 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
687 u32 max_num_sg, struct ib_udata *udata)
689 struct c4iw_dev *rhp;
695 int length = roundup(max_num_sg * sizeof(u64), 32);
697 php = to_c4iw_pd(pd);
700 if (mr_type != IB_MR_TYPE_MEM_REG ||
701 max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
703 return ERR_PTR(-EINVAL);
705 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
711 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
712 if (!mhp->wr_waitp) {
716 c4iw_init_wr_wait(mhp->wr_waitp);
718 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
719 length, &mhp->mpl_addr, GFP_KERNEL);
722 goto err_free_wr_wait;
724 mhp->max_mpl_len = length;
727 ret = alloc_pbl(mhp, max_num_sg);
730 mhp->attr.pbl_size = max_num_sg;
731 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
732 mhp->attr.pbl_size, mhp->attr.pbl_addr,
736 mhp->attr.pdid = php->pdid;
737 mhp->attr.type = FW_RI_STAG_NSMR;
738 mhp->attr.stag = stag;
741 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
742 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
747 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
750 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
751 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
753 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
754 mhp->attr.pbl_size << 3);
756 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
757 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
759 c4iw_put_wr_wait(mhp->wr_waitp);
766 static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
768 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
770 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
773 mhp->mpl[mhp->mpl_len++] = addr;
778 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
779 unsigned int *sg_offset)
781 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
785 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
788 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
790 struct c4iw_dev *rhp;
794 pr_debug("ib_mr %p\n", ib_mr);
796 mhp = to_c4iw_mr(ib_mr);
798 mmid = mhp->attr.stag >> 8;
799 xa_erase_irq(&rhp->mrs, mmid);
801 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
802 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
803 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
804 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
805 if (mhp->attr.pbl_size)
806 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
807 mhp->attr.pbl_size << 3);
809 kfree((void *) (unsigned long) mhp->kva);
811 ib_umem_release(mhp->umem);
812 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
813 c4iw_put_wr_wait(mhp->wr_waitp);
818 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
823 xa_lock_irqsave(&rhp->mrs, flags);
824 mhp = xa_load(&rhp->mrs, rkey >> 8);
827 xa_unlock_irqrestore(&rhp->mrs, flags);