iommu/amd: Move IO page table related functions
[linux-2.6-microblaze.git] / drivers / iommu / amd / io_pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic AMD IO page table allocator.
4  *
5  * Copyright (C) 2020 Advanced Micro Devices, Inc.
6  * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7  */
8
9 #define pr_fmt(fmt)     "AMD-Vi: " fmt
10 #define dev_fmt(fmt)    pr_fmt(fmt)
11
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/barrier.h>
22
23 #include "amd_iommu_types.h"
24 #include "amd_iommu.h"
25
26 static void v1_tlb_flush_all(void *cookie)
27 {
28 }
29
30 static void v1_tlb_flush_walk(unsigned long iova, size_t size,
31                                   size_t granule, void *cookie)
32 {
33 }
34
35 static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
36                                          unsigned long iova, size_t granule,
37                                          void *cookie)
38 {
39 }
40
41 static const struct iommu_flush_ops v1_flush_ops = {
42         .tlb_flush_all  = v1_tlb_flush_all,
43         .tlb_flush_walk = v1_tlb_flush_walk,
44         .tlb_add_page   = v1_tlb_add_page,
45 };
46
47 /*
48  * Helper function to get the first pte of a large mapping
49  */
50 static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
51                          unsigned long *count)
52 {
53         unsigned long pte_mask, pg_size, cnt;
54         u64 *fpte;
55
56         pg_size  = PTE_PAGE_SIZE(*pte);
57         cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
58         pte_mask = ~((cnt << 3) - 1);
59         fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
60
61         if (page_size)
62                 *page_size = pg_size;
63
64         if (count)
65                 *count = cnt;
66
67         return fpte;
68 }
69
70 /****************************************************************************
71  *
72  * The functions below are used the create the page table mappings for
73  * unity mapped regions.
74  *
75  ****************************************************************************/
76
77 static void free_page_list(struct page *freelist)
78 {
79         while (freelist != NULL) {
80                 unsigned long p = (unsigned long)page_address(freelist);
81
82                 freelist = freelist->freelist;
83                 free_page(p);
84         }
85 }
86
87 static struct page *free_pt_page(unsigned long pt, struct page *freelist)
88 {
89         struct page *p = virt_to_page((void *)pt);
90
91         p->freelist = freelist;
92
93         return p;
94 }
95
96 #define DEFINE_FREE_PT_FN(LVL, FN)                                              \
97 static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist)   \
98 {                                                                               \
99         unsigned long p;                                                        \
100         u64 *pt;                                                                \
101         int i;                                                                  \
102                                                                                 \
103         pt = (u64 *)__pt;                                                       \
104                                                                                 \
105         for (i = 0; i < 512; ++i) {                                             \
106                 /* PTE present? */                                              \
107                 if (!IOMMU_PTE_PRESENT(pt[i]))                                  \
108                         continue;                                               \
109                                                                                 \
110                 /* Large PTE? */                                                \
111                 if (PM_PTE_LEVEL(pt[i]) == 0 ||                                 \
112                     PM_PTE_LEVEL(pt[i]) == 7)                                   \
113                         continue;                                               \
114                                                                                 \
115                 p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);                       \
116                 freelist = FN(p, freelist);                                     \
117         }                                                                       \
118                                                                                 \
119         return free_pt_page((unsigned long)pt, freelist);                       \
120 }
121
122 DEFINE_FREE_PT_FN(l2, free_pt_page)
123 DEFINE_FREE_PT_FN(l3, free_pt_l2)
124 DEFINE_FREE_PT_FN(l4, free_pt_l3)
125 DEFINE_FREE_PT_FN(l5, free_pt_l4)
126 DEFINE_FREE_PT_FN(l6, free_pt_l5)
127
128 static struct page *free_sub_pt(unsigned long root, int mode,
129                                 struct page *freelist)
130 {
131         switch (mode) {
132         case PAGE_MODE_NONE:
133         case PAGE_MODE_7_LEVEL:
134                 break;
135         case PAGE_MODE_1_LEVEL:
136                 freelist = free_pt_page(root, freelist);
137                 break;
138         case PAGE_MODE_2_LEVEL:
139                 freelist = free_pt_l2(root, freelist);
140                 break;
141         case PAGE_MODE_3_LEVEL:
142                 freelist = free_pt_l3(root, freelist);
143                 break;
144         case PAGE_MODE_4_LEVEL:
145                 freelist = free_pt_l4(root, freelist);
146                 break;
147         case PAGE_MODE_5_LEVEL:
148                 freelist = free_pt_l5(root, freelist);
149                 break;
150         case PAGE_MODE_6_LEVEL:
151                 freelist = free_pt_l6(root, freelist);
152                 break;
153         default:
154                 BUG();
155         }
156
157         return freelist;
158 }
159
160 void free_pagetable(struct domain_pgtable *pgtable)
161 {
162         struct page *freelist = NULL;
163         unsigned long root;
164
165         if (pgtable->mode == PAGE_MODE_NONE)
166                 return;
167
168         BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
169                pgtable->mode > PAGE_MODE_6_LEVEL);
170
171         root = (unsigned long)pgtable->root;
172         freelist = free_sub_pt(root, pgtable->mode, freelist);
173
174         free_page_list(freelist);
175 }
176
177 void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
178                                   u64 *root, int mode)
179 {
180         u64 pt_root;
181
182         /* lowest 3 bits encode pgtable mode */
183         pt_root = mode & 7;
184         pt_root |= (u64)root;
185
186         amd_iommu_domain_set_pt_root(domain, pt_root);
187 }
188
189 /*
190  * This function is used to add another level to an IO page table. Adding
191  * another level increases the size of the address space by 9 bits to a size up
192  * to 64 bits.
193  */
194 static bool increase_address_space(struct protection_domain *domain,
195                                    unsigned long address,
196                                    gfp_t gfp)
197 {
198         struct domain_pgtable pgtable;
199         unsigned long flags;
200         bool ret = true;
201         u64 *pte;
202
203         spin_lock_irqsave(&domain->lock, flags);
204
205         amd_iommu_domain_get_pgtable(domain, &pgtable);
206
207         if (address <= PM_LEVEL_SIZE(pgtable.mode))
208                 goto out;
209
210         ret = false;
211         if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
212                 goto out;
213
214         pte = (void *)get_zeroed_page(gfp);
215         if (!pte)
216                 goto out;
217
218         *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
219
220         pgtable.root  = pte;
221         pgtable.mode += 1;
222         amd_iommu_update_and_flush_device_table(domain);
223         amd_iommu_domain_flush_complete(domain);
224
225         /*
226          * Device Table needs to be updated and flushed before the new root can
227          * be published.
228          */
229         amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
230
231         ret = true;
232
233 out:
234         spin_unlock_irqrestore(&domain->lock, flags);
235
236         return ret;
237 }
238
239 static u64 *alloc_pte(struct protection_domain *domain,
240                       unsigned long address,
241                       unsigned long page_size,
242                       u64 **pte_page,
243                       gfp_t gfp,
244                       bool *updated)
245 {
246         struct domain_pgtable pgtable;
247         int level, end_lvl;
248         u64 *pte, *page;
249
250         BUG_ON(!is_power_of_2(page_size));
251
252         amd_iommu_domain_get_pgtable(domain, &pgtable);
253
254         while (address > PM_LEVEL_SIZE(pgtable.mode)) {
255                 /*
256                  * Return an error if there is no memory to update the
257                  * page-table.
258                  */
259                 if (!increase_address_space(domain, address, gfp))
260                         return NULL;
261
262                 /* Read new values to check if update was successful */
263                 amd_iommu_domain_get_pgtable(domain, &pgtable);
264         }
265
266
267         level   = pgtable.mode - 1;
268         pte     = &pgtable.root[PM_LEVEL_INDEX(level, address)];
269         address = PAGE_SIZE_ALIGN(address, page_size);
270         end_lvl = PAGE_SIZE_LEVEL(page_size);
271
272         while (level > end_lvl) {
273                 u64 __pte, __npte;
274                 int pte_level;
275
276                 __pte     = *pte;
277                 pte_level = PM_PTE_LEVEL(__pte);
278
279                 /*
280                  * If we replace a series of large PTEs, we need
281                  * to tear down all of them.
282                  */
283                 if (IOMMU_PTE_PRESENT(__pte) &&
284                     pte_level == PAGE_MODE_7_LEVEL) {
285                         unsigned long count, i;
286                         u64 *lpte;
287
288                         lpte = first_pte_l7(pte, NULL, &count);
289
290                         /*
291                          * Unmap the replicated PTEs that still match the
292                          * original large mapping
293                          */
294                         for (i = 0; i < count; ++i)
295                                 cmpxchg64(&lpte[i], __pte, 0ULL);
296
297                         *updated = true;
298                         continue;
299                 }
300
301                 if (!IOMMU_PTE_PRESENT(__pte) ||
302                     pte_level == PAGE_MODE_NONE) {
303                         page = (u64 *)get_zeroed_page(gfp);
304
305                         if (!page)
306                                 return NULL;
307
308                         __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
309
310                         /* pte could have been changed somewhere. */
311                         if (cmpxchg64(pte, __pte, __npte) != __pte)
312                                 free_page((unsigned long)page);
313                         else if (IOMMU_PTE_PRESENT(__pte))
314                                 *updated = true;
315
316                         continue;
317                 }
318
319                 /* No level skipping support yet */
320                 if (pte_level != level)
321                         return NULL;
322
323                 level -= 1;
324
325                 pte = IOMMU_PTE_PAGE(__pte);
326
327                 if (pte_page && level == end_lvl)
328                         *pte_page = pte;
329
330                 pte = &pte[PM_LEVEL_INDEX(level, address)];
331         }
332
333         return pte;
334 }
335
336 /*
337  * This function checks if there is a PTE for a given dma address. If
338  * there is one, it returns the pointer to it.
339  */
340 u64 *fetch_pte(struct protection_domain *domain,
341                unsigned long address,
342                unsigned long *page_size)
343 {
344         struct domain_pgtable pgtable;
345         int level;
346         u64 *pte;
347
348         *page_size = 0;
349
350         amd_iommu_domain_get_pgtable(domain, &pgtable);
351
352         if (address > PM_LEVEL_SIZE(pgtable.mode))
353                 return NULL;
354
355         level      =  pgtable.mode - 1;
356         pte        = &pgtable.root[PM_LEVEL_INDEX(level, address)];
357         *page_size =  PTE_LEVEL_PAGE_SIZE(level);
358
359         while (level > 0) {
360
361                 /* Not Present */
362                 if (!IOMMU_PTE_PRESENT(*pte))
363                         return NULL;
364
365                 /* Large PTE */
366                 if (PM_PTE_LEVEL(*pte) == 7 ||
367                     PM_PTE_LEVEL(*pte) == 0)
368                         break;
369
370                 /* No level skipping support yet */
371                 if (PM_PTE_LEVEL(*pte) != level)
372                         return NULL;
373
374                 level -= 1;
375
376                 /* Walk to the next level */
377                 pte        = IOMMU_PTE_PAGE(*pte);
378                 pte        = &pte[PM_LEVEL_INDEX(level, address)];
379                 *page_size = PTE_LEVEL_PAGE_SIZE(level);
380         }
381
382         /*
383          * If we have a series of large PTEs, make
384          * sure to return a pointer to the first one.
385          */
386         if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
387                 pte = first_pte_l7(pte, page_size, NULL);
388
389         return pte;
390 }
391
392 static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
393 {
394         unsigned long pt;
395         int mode;
396
397         while (cmpxchg64(pte, pteval, 0) != pteval) {
398                 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
399                 pteval = *pte;
400         }
401
402         if (!IOMMU_PTE_PRESENT(pteval))
403                 return freelist;
404
405         pt   = (unsigned long)IOMMU_PTE_PAGE(pteval);
406         mode = IOMMU_PTE_MODE(pteval);
407
408         return free_sub_pt(pt, mode, freelist);
409 }
410
411 /*
412  * Generic mapping functions. It maps a physical address into a DMA
413  * address space. It allocates the page table pages if necessary.
414  * In the future it can be extended to a generic mapping function
415  * supporting all features of AMD IOMMU page tables like level skipping
416  * and full 64 bit address spaces.
417  */
418 int iommu_map_page(struct protection_domain *dom,
419                    unsigned long bus_addr,
420                    unsigned long phys_addr,
421                    unsigned long page_size,
422                    int prot,
423                    gfp_t gfp)
424 {
425         struct page *freelist = NULL;
426         bool updated = false;
427         u64 __pte, *pte;
428         int ret, i, count;
429
430         BUG_ON(!IS_ALIGNED(bus_addr, page_size));
431         BUG_ON(!IS_ALIGNED(phys_addr, page_size));
432
433         ret = -EINVAL;
434         if (!(prot & IOMMU_PROT_MASK))
435                 goto out;
436
437         count = PAGE_SIZE_PTE_COUNT(page_size);
438         pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
439
440         ret = -ENOMEM;
441         if (!pte)
442                 goto out;
443
444         for (i = 0; i < count; ++i)
445                 freelist = free_clear_pte(&pte[i], pte[i], freelist);
446
447         if (freelist != NULL)
448                 updated = true;
449
450         if (count > 1) {
451                 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
452                 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
453         } else
454                 __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
455
456         if (prot & IOMMU_PROT_IR)
457                 __pte |= IOMMU_PTE_IR;
458         if (prot & IOMMU_PROT_IW)
459                 __pte |= IOMMU_PTE_IW;
460
461         for (i = 0; i < count; ++i)
462                 pte[i] = __pte;
463
464         ret = 0;
465
466 out:
467         if (updated) {
468                 unsigned long flags;
469
470                 spin_lock_irqsave(&dom->lock, flags);
471                 /*
472                  * Flush domain TLB(s) and wait for completion. Any Device-Table
473                  * Updates and flushing already happened in
474                  * increase_address_space().
475                  */
476                 amd_iommu_domain_flush_tlb_pde(dom);
477                 amd_iommu_domain_flush_complete(dom);
478                 spin_unlock_irqrestore(&dom->lock, flags);
479         }
480
481         /* Everything flushed out, free pages now */
482         free_page_list(freelist);
483
484         return ret;
485 }
486
487 unsigned long iommu_unmap_page(struct protection_domain *dom,
488                                unsigned long bus_addr,
489                                unsigned long page_size)
490 {
491         unsigned long long unmapped;
492         unsigned long unmap_size;
493         u64 *pte;
494
495         BUG_ON(!is_power_of_2(page_size));
496
497         unmapped = 0;
498
499         while (unmapped < page_size) {
500
501                 pte = fetch_pte(dom, bus_addr, &unmap_size);
502
503                 if (pte) {
504                         int i, count;
505
506                         count = PAGE_SIZE_PTE_COUNT(unmap_size);
507                         for (i = 0; i < count; i++)
508                                 pte[i] = 0ULL;
509                 }
510
511                 bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
512                 unmapped += unmap_size;
513         }
514
515         BUG_ON(unmapped && !is_power_of_2(unmapped));
516
517         return unmapped;
518 }
519
520 /*
521  * ----------------------------------------------------
522  */
523 static void v1_free_pgtable(struct io_pgtable *iop)
524 {
525 }
526
527 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
528 {
529         struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
530
531         cfg->pgsize_bitmap  = AMD_IOMMU_PGSIZES,
532         cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE,
533         cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
534         cfg->tlb            = &v1_flush_ops;
535
536         return &pgtable->iop;
537 }
538
539 struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
540         .alloc  = v1_alloc_pgtable,
541         .free   = v1_free_pgtable,
542 };