917b783433f7dad4ca812e0146b927064cae8aa8
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_chain.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright (c) 2020 Marvell International Ltd. */
3
4 #include <linux/dma-mapping.h>
5 #include <linux/qed/qed_chain.h>
6 #include <linux/vmalloc.h>
7
8 #include "qed_dev_api.h"
9
10 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
11                                     struct qed_chain *chain)
12 {
13         struct device *dev = &cdev->pdev->dev;
14         struct qed_chain_next *next;
15         dma_addr_t phys, phys_next;
16         void *virt, *virt_next;
17         u32 size, i;
18
19         size = chain->elem_size * chain->usable_per_page;
20         virt = chain->p_virt_addr;
21         phys = chain->p_phys_addr;
22
23         for (i = 0; i < chain->page_cnt; i++) {
24                 if (!virt)
25                         break;
26
27                 next = virt + size;
28                 virt_next = next->next_virt;
29                 phys_next = HILO_DMA_REGPAIR(next->next_phys);
30
31                 dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
32
33                 virt = virt_next;
34                 phys = phys_next;
35         }
36 }
37
38 static void qed_chain_free_single(struct qed_dev *cdev,
39                                   struct qed_chain *chain)
40 {
41         if (!chain->p_virt_addr)
42                 return;
43
44         dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
45                           chain->p_virt_addr, chain->p_phys_addr);
46 }
47
48 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
49 {
50         struct device *dev = &cdev->pdev->dev;
51         struct addr_tbl_entry *entry;
52         u32 pbl_size, i;
53
54         if (!chain->pbl.pp_addr_tbl)
55                 return;
56
57         for (i = 0; i < chain->page_cnt; i++) {
58                 entry = chain->pbl.pp_addr_tbl + i;
59                 if (!entry->virt_addr)
60                         break;
61
62                 dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
63                                   entry->dma_map);
64         }
65
66         pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
67
68         if (!chain->b_external_pbl)
69                 dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
70                                   chain->pbl_sp.p_phys_table);
71
72         vfree(chain->pbl.pp_addr_tbl);
73         chain->pbl.pp_addr_tbl = NULL;
74 }
75
76 /**
77  * qed_chain_free() - Free chain DMA memory.
78  *
79  * @cdev: Main device structure.
80  * @chain: Chain to free.
81  */
82 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
83 {
84         switch (chain->mode) {
85         case QED_CHAIN_MODE_NEXT_PTR:
86                 qed_chain_free_next_ptr(cdev, chain);
87                 break;
88         case QED_CHAIN_MODE_SINGLE:
89                 qed_chain_free_single(cdev, chain);
90                 break;
91         case QED_CHAIN_MODE_PBL:
92                 qed_chain_free_pbl(cdev, chain);
93                 break;
94         default:
95                 return;
96         }
97
98         qed_chain_init_mem(chain, NULL, 0);
99 }
100
101 static int
102 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
103                              enum qed_chain_cnt_type cnt_type,
104                              size_t elem_size, u32 page_cnt)
105 {
106         u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
107
108         /* The actual chain size can be larger than the maximal possible value
109          * after rounding up the requested elements number to pages, and after
110          * taking into account the unusuable elements (next-ptr elements).
111          * The size of a "u16" chain can be (U16_MAX + 1) since the chain
112          * size/capacity fields are of u32 type.
113          */
114         switch (cnt_type) {
115         case QED_CHAIN_CNT_TYPE_U16:
116                 if (chain_size > U16_MAX + 1)
117                         break;
118
119                 return 0;
120         case QED_CHAIN_CNT_TYPE_U32:
121                 if (chain_size > U32_MAX)
122                         break;
123
124                 return 0;
125         default:
126                 return -EINVAL;
127         }
128
129         DP_NOTICE(cdev,
130                   "The actual chain size (0x%llx) is larger than the maximal possible value\n",
131                   chain_size);
132
133         return -EINVAL;
134 }
135
136 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
137                                     struct qed_chain *chain)
138 {
139         struct device *dev = &cdev->pdev->dev;
140         void *virt, *virt_prev = NULL;
141         dma_addr_t phys;
142         u32 i;
143
144         for (i = 0; i < chain->page_cnt; i++) {
145                 virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
146                                           GFP_KERNEL);
147                 if (!virt)
148                         return -ENOMEM;
149
150                 if (i == 0) {
151                         qed_chain_init_mem(chain, virt, phys);
152                         qed_chain_reset(chain);
153                 } else {
154                         qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
155                                                      phys);
156                 }
157
158                 virt_prev = virt;
159         }
160
161         /* Last page's next element should point to the beginning of the
162          * chain.
163          */
164         qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
165                                      chain->p_phys_addr);
166
167         return 0;
168 }
169
170 static int qed_chain_alloc_single(struct qed_dev *cdev,
171                                   struct qed_chain *chain)
172 {
173         dma_addr_t phys;
174         void *virt;
175
176         virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
177                                   &phys, GFP_KERNEL);
178         if (!virt)
179                 return -ENOMEM;
180
181         qed_chain_init_mem(chain, virt, phys);
182         qed_chain_reset(chain);
183
184         return 0;
185 }
186
187 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
188                                struct qed_chain_ext_pbl *ext_pbl)
189 {
190         struct device *dev = &cdev->pdev->dev;
191         struct addr_tbl_entry *addr_tbl;
192         dma_addr_t phys, pbl_phys;
193         void *pbl_virt;
194         u32 page_cnt, i;
195         size_t size;
196         void *virt;
197
198         page_cnt = chain->page_cnt;
199
200         size = array_size(page_cnt, sizeof(*addr_tbl));
201         if (unlikely(size == SIZE_MAX))
202                 return -EOVERFLOW;
203
204         addr_tbl = vzalloc(size);
205         if (!addr_tbl)
206                 return -ENOMEM;
207
208         chain->pbl.pp_addr_tbl = addr_tbl;
209
210         if (ext_pbl) {
211                 size = 0;
212                 pbl_virt = ext_pbl->p_pbl_virt;
213                 pbl_phys = ext_pbl->p_pbl_phys;
214
215                 chain->b_external_pbl = true;
216         } else {
217                 size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
218                 if (unlikely(size == SIZE_MAX))
219                         return -EOVERFLOW;
220
221                 pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
222                                               GFP_KERNEL);
223         }
224
225         if (!pbl_virt)
226                 return -ENOMEM;
227
228         chain->pbl_sp.p_virt_table = pbl_virt;
229         chain->pbl_sp.p_phys_table = pbl_phys;
230
231         for (i = 0; i < page_cnt; i++) {
232                 virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
233                                           GFP_KERNEL);
234                 if (!virt)
235                         return -ENOMEM;
236
237                 if (i == 0) {
238                         qed_chain_init_mem(chain, virt, phys);
239                         qed_chain_reset(chain);
240                 }
241
242                 /* Fill the PBL table with the physical address of the page */
243                 *(dma_addr_t *)pbl_virt = phys;
244                 pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
245
246                 /* Keep the virtual address of the page */
247                 addr_tbl[i].virt_addr = virt;
248                 addr_tbl[i].dma_map = phys;
249         }
250
251         return 0;
252 }
253
254 int qed_chain_alloc(struct qed_dev *cdev,
255                     enum qed_chain_use_mode intended_use,
256                     enum qed_chain_mode mode,
257                     enum qed_chain_cnt_type cnt_type,
258                     u32 num_elems,
259                     size_t elem_size,
260                     struct qed_chain *chain,
261                     struct qed_chain_ext_pbl *ext_pbl)
262 {
263         u32 page_cnt;
264         int rc;
265
266         if (mode == QED_CHAIN_MODE_SINGLE)
267                 page_cnt = 1;
268         else
269                 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
270
271         rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
272         if (rc) {
273                 DP_NOTICE(cdev,
274                           "Cannot allocate a chain with the given arguments:\n");
275                 DP_NOTICE(cdev,
276                           "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
277                           intended_use, mode, cnt_type, num_elems, elem_size);
278                 return rc;
279         }
280
281         qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
282                               cnt_type);
283
284         switch (mode) {
285         case QED_CHAIN_MODE_NEXT_PTR:
286                 rc = qed_chain_alloc_next_ptr(cdev, chain);
287                 break;
288         case QED_CHAIN_MODE_SINGLE:
289                 rc = qed_chain_alloc_single(cdev, chain);
290                 break;
291         case QED_CHAIN_MODE_PBL:
292                 rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
293                 break;
294         default:
295                 return -EINVAL;
296         }
297
298         if (!rc)
299                 return 0;
300
301         qed_chain_free(cdev, chain);
302
303         return rc;
304 }