1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright (c) 2020 Marvell International Ltd. */
4 #include <linux/dma-mapping.h>
5 #include <linux/qed/qed_chain.h>
6 #include <linux/vmalloc.h>
8 #include "qed_dev_api.h"
10 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
11 struct qed_chain *chain)
13 struct device *dev = &cdev->pdev->dev;
14 struct qed_chain_next *next;
15 dma_addr_t phys, phys_next;
16 void *virt, *virt_next;
19 size = chain->elem_size * chain->usable_per_page;
20 virt = chain->p_virt_addr;
21 phys = chain->p_phys_addr;
23 for (i = 0; i < chain->page_cnt; i++) {
28 virt_next = next->next_virt;
29 phys_next = HILO_DMA_REGPAIR(next->next_phys);
31 dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
38 static void qed_chain_free_single(struct qed_dev *cdev,
39 struct qed_chain *chain)
41 if (!chain->p_virt_addr)
44 dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
45 chain->p_virt_addr, chain->p_phys_addr);
48 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
50 struct device *dev = &cdev->pdev->dev;
51 struct addr_tbl_entry *entry;
54 if (!chain->pbl.pp_addr_tbl)
57 for (i = 0; i < chain->page_cnt; i++) {
58 entry = chain->pbl.pp_addr_tbl + i;
59 if (!entry->virt_addr)
62 dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
66 pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
68 if (!chain->b_external_pbl)
69 dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
70 chain->pbl_sp.p_phys_table);
72 vfree(chain->pbl.pp_addr_tbl);
73 chain->pbl.pp_addr_tbl = NULL;
77 * qed_chain_free() - Free chain DMA memory.
79 * @cdev: Main device structure.
80 * @chain: Chain to free.
82 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
84 switch (chain->mode) {
85 case QED_CHAIN_MODE_NEXT_PTR:
86 qed_chain_free_next_ptr(cdev, chain);
88 case QED_CHAIN_MODE_SINGLE:
89 qed_chain_free_single(cdev, chain);
91 case QED_CHAIN_MODE_PBL:
92 qed_chain_free_pbl(cdev, chain);
98 qed_chain_init_mem(chain, NULL, 0);
102 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
103 enum qed_chain_cnt_type cnt_type,
104 size_t elem_size, u32 page_cnt)
106 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
108 /* The actual chain size can be larger than the maximal possible value
109 * after rounding up the requested elements number to pages, and after
110 * taking into account the unusuable elements (next-ptr elements).
111 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
112 * size/capacity fields are of u32 type.
115 case QED_CHAIN_CNT_TYPE_U16:
116 if (chain_size > U16_MAX + 1)
120 case QED_CHAIN_CNT_TYPE_U32:
121 if (chain_size > U32_MAX)
130 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
136 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
137 struct qed_chain *chain)
139 struct device *dev = &cdev->pdev->dev;
140 void *virt, *virt_prev = NULL;
144 for (i = 0; i < chain->page_cnt; i++) {
145 virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
151 qed_chain_init_mem(chain, virt, phys);
152 qed_chain_reset(chain);
154 qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
161 /* Last page's next element should point to the beginning of the
164 qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
170 static int qed_chain_alloc_single(struct qed_dev *cdev,
171 struct qed_chain *chain)
176 virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
181 qed_chain_init_mem(chain, virt, phys);
182 qed_chain_reset(chain);
187 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
188 struct qed_chain_ext_pbl *ext_pbl)
190 struct device *dev = &cdev->pdev->dev;
191 struct addr_tbl_entry *addr_tbl;
192 dma_addr_t phys, pbl_phys;
198 page_cnt = chain->page_cnt;
200 size = array_size(page_cnt, sizeof(*addr_tbl));
201 if (unlikely(size == SIZE_MAX))
204 addr_tbl = vzalloc(size);
208 chain->pbl.pp_addr_tbl = addr_tbl;
212 pbl_virt = ext_pbl->p_pbl_virt;
213 pbl_phys = ext_pbl->p_pbl_phys;
215 chain->b_external_pbl = true;
217 size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
218 if (unlikely(size == SIZE_MAX))
221 pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
228 chain->pbl_sp.p_virt_table = pbl_virt;
229 chain->pbl_sp.p_phys_table = pbl_phys;
231 for (i = 0; i < page_cnt; i++) {
232 virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
238 qed_chain_init_mem(chain, virt, phys);
239 qed_chain_reset(chain);
242 /* Fill the PBL table with the physical address of the page */
243 *(dma_addr_t *)pbl_virt = phys;
244 pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
246 /* Keep the virtual address of the page */
247 addr_tbl[i].virt_addr = virt;
248 addr_tbl[i].dma_map = phys;
254 int qed_chain_alloc(struct qed_dev *cdev,
255 enum qed_chain_use_mode intended_use,
256 enum qed_chain_mode mode,
257 enum qed_chain_cnt_type cnt_type,
260 struct qed_chain *chain,
261 struct qed_chain_ext_pbl *ext_pbl)
266 if (mode == QED_CHAIN_MODE_SINGLE)
269 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
271 rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
274 "Cannot allocate a chain with the given arguments:\n");
276 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
277 intended_use, mode, cnt_type, num_elems, elem_size);
281 qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
285 case QED_CHAIN_MODE_NEXT_PTR:
286 rc = qed_chain_alloc_next_ptr(cdev, chain);
288 case QED_CHAIN_MODE_SINGLE:
289 rc = qed_chain_alloc_single(cdev, chain);
291 case QED_CHAIN_MODE_PBL:
292 rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
301 qed_chain_free(cdev, chain);