1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "include/hw_ip/mmu/mmu_general.h"
11 #include <linux/genalloc.h>
12 #include <linux/slab.h>
14 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
16 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
18 struct pgt_info *pgt_info = NULL;
20 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
21 (unsigned long) hop_addr)
22 if (hop_addr == pgt_info->shadow_addr)
28 static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
30 struct hl_device *hdev = ctx->hdev;
32 gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
33 hdev->asic_prop.mmu_hop_table_size);
34 hash_del(&pgt_info->node);
35 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
39 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
41 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
43 _free_hop(ctx, pgt_info);
46 static u64 alloc_hop(struct hl_ctx *ctx)
48 struct hl_device *hdev = ctx->hdev;
49 struct asic_fixed_properties *prop = &hdev->asic_prop;
50 struct pgt_info *pgt_info;
51 u64 phys_addr, shadow_addr;
53 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
57 phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
58 prop->mmu_hop_table_size);
60 dev_err(hdev->dev, "failed to allocate page\n");
64 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
69 pgt_info->phys_addr = phys_addr;
70 pgt_info->shadow_addr = shadow_addr;
72 pgt_info->num_of_ptes = 0;
73 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
78 gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
85 static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
87 return ctx->hdev->asic_prop.mmu_pgt_addr +
88 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
91 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
93 return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
94 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
97 static inline void flush(struct hl_ctx *ctx)
99 /* flush all writes from all cores to reach PCI */
101 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
104 /* transform the value to physical address when writing to H/W */
105 static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
108 * The value to write is actually the address of the next shadow hop +
109 * flags at the 12 LSBs.
110 * Hence in order to get the value to write to the physical PTE, we
111 * clear the 12 LSBs and translate the shadow hop to its associated
112 * physical hop, and add back the original 12 LSBs.
114 u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
117 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
118 get_phys_addr(ctx, shadow_pte_addr),
121 *(u64 *) (uintptr_t) shadow_pte_addr = val;
124 /* do not transform the value to physical address when writing to H/W */
125 static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
128 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
129 get_phys_addr(ctx, shadow_pte_addr),
131 *(u64 *) (uintptr_t) shadow_pte_addr = val;
134 /* clear the last and present bits */
135 static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
137 /* no need to transform the value to physical address */
138 write_final_pte(ctx, pte_addr, 0);
141 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
143 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
147 * put_pte - decrement the num of ptes and free the hop if possible
149 * @ctx: pointer to the context structure
150 * @hop_addr: addr of the hop
152 * This function returns the number of ptes left on this hop. If the number is
153 * 0, it means the pte was freed.
155 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
157 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
158 int num_of_ptes_left;
160 pgt_info->num_of_ptes--;
163 * Need to save the number of ptes left because free_hop might free
166 num_of_ptes_left = pgt_info->num_of_ptes;
167 if (!num_of_ptes_left)
168 _free_hop(ctx, pgt_info);
170 return num_of_ptes_left;
173 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
174 u64 virt_addr, u64 mask, u64 shift)
176 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
177 ((virt_addr & mask) >> shift);
180 static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
181 struct hl_mmu_properties *mmu_prop,
182 u64 hop_addr, u64 vaddr)
184 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
185 mmu_prop->hop0_shift);
188 static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
189 struct hl_mmu_properties *mmu_prop,
190 u64 hop_addr, u64 vaddr)
192 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
193 mmu_prop->hop1_shift);
196 static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
197 struct hl_mmu_properties *mmu_prop,
198 u64 hop_addr, u64 vaddr)
200 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
201 mmu_prop->hop2_shift);
204 static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
205 struct hl_mmu_properties *mmu_prop,
206 u64 hop_addr, u64 vaddr)
208 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
209 mmu_prop->hop3_shift);
212 static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
213 struct hl_mmu_properties *mmu_prop,
214 u64 hop_addr, u64 vaddr)
216 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
217 mmu_prop->hop4_shift);
220 static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
222 if (curr_pte & PAGE_PRESENT_MASK)
223 return curr_pte & HOP_PHYS_ADDR_MASK;
228 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
231 u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
233 if (hop_addr == ULLONG_MAX) {
234 hop_addr = alloc_hop(ctx);
235 *is_new_hop = (hop_addr != ULLONG_MAX);
241 /* translates shadow address inside hop to a physical address */
242 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
244 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
245 u64 shadow_hop_addr = shadow_addr & ~page_mask;
246 u64 pte_offset = shadow_addr & page_mask;
249 if (shadow_hop_addr != get_hop0_addr(ctx))
250 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
252 phys_hop_addr = get_phys_hop0_addr(ctx);
254 return phys_hop_addr + pte_offset;
257 static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
259 struct asic_fixed_properties *prop = &hdev->asic_prop;
261 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
262 prop->dmmu.start_addr,
263 prop->dmmu.end_addr);
266 static int dram_default_mapping_init(struct hl_ctx *ctx)
268 struct hl_device *hdev = ctx->hdev;
269 struct asic_fixed_properties *prop = &hdev->asic_prop;
270 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
271 hop2_pte_addr, hop3_pte_addr, pte_val;
272 int rc, i, j, hop3_allocated = 0;
274 if ((!hdev->dram_supports_virtual_memory) ||
275 (!hdev->dram_default_page_mapping) ||
276 (ctx->asid == HL_KERNEL_ASID_ID))
279 num_of_hop3 = prop->dram_size_for_default_page_mapping;
280 do_div(num_of_hop3, prop->dram_page_size);
281 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
283 /* add hop1 and hop2 */
284 total_hops = num_of_hop3 + 2;
286 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
287 if (!ctx->dram_default_hops)
290 hop0_addr = get_hop0_addr(ctx);
292 hop1_addr = alloc_hop(ctx);
293 if (hop1_addr == ULLONG_MAX) {
294 dev_err(hdev->dev, "failed to alloc hop 1\n");
299 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
301 hop2_addr = alloc_hop(ctx);
302 if (hop2_addr == ULLONG_MAX) {
303 dev_err(hdev->dev, "failed to alloc hop 2\n");
308 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
310 for (i = 0 ; i < num_of_hop3 ; i++) {
311 ctx->dram_default_hops[i] = alloc_hop(ctx);
312 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
313 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
320 /* need only pte 0 in hops 0 and 1 */
321 pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
322 write_pte(ctx, hop0_addr, pte_val);
324 pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
325 write_pte(ctx, hop1_addr, pte_val);
326 get_pte(ctx, hop1_addr);
328 hop2_pte_addr = hop2_addr;
329 for (i = 0 ; i < num_of_hop3 ; i++) {
330 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
332 write_pte(ctx, hop2_pte_addr, pte_val);
333 get_pte(ctx, hop2_addr);
334 hop2_pte_addr += HL_PTE_SIZE;
337 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
338 LAST_MASK | PAGE_PRESENT_MASK;
340 for (i = 0 ; i < num_of_hop3 ; i++) {
341 hop3_pte_addr = ctx->dram_default_hops[i];
342 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
343 write_final_pte(ctx, hop3_pte_addr, pte_val);
344 get_pte(ctx, ctx->dram_default_hops[i]);
345 hop3_pte_addr += HL_PTE_SIZE;
354 for (i = 0 ; i < hop3_allocated ; i++)
355 free_hop(ctx, ctx->dram_default_hops[i]);
357 free_hop(ctx, hop2_addr);
359 free_hop(ctx, hop1_addr);
361 kfree(ctx->dram_default_hops);
366 static void dram_default_mapping_fini(struct hl_ctx *ctx)
368 struct hl_device *hdev = ctx->hdev;
369 struct asic_fixed_properties *prop = &hdev->asic_prop;
370 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
371 hop2_pte_addr, hop3_pte_addr;
374 if ((!hdev->dram_supports_virtual_memory) ||
375 (!hdev->dram_default_page_mapping) ||
376 (ctx->asid == HL_KERNEL_ASID_ID))
379 num_of_hop3 = prop->dram_size_for_default_page_mapping;
380 do_div(num_of_hop3, prop->dram_page_size);
381 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
383 hop0_addr = get_hop0_addr(ctx);
384 /* add hop1 and hop2 */
385 total_hops = num_of_hop3 + 2;
386 hop1_addr = ctx->dram_default_hops[total_hops - 1];
387 hop2_addr = ctx->dram_default_hops[total_hops - 2];
389 for (i = 0 ; i < num_of_hop3 ; i++) {
390 hop3_pte_addr = ctx->dram_default_hops[i];
391 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
392 clear_pte(ctx, hop3_pte_addr);
393 put_pte(ctx, ctx->dram_default_hops[i]);
394 hop3_pte_addr += HL_PTE_SIZE;
398 hop2_pte_addr = hop2_addr;
399 hop2_pte_addr = hop2_addr;
400 for (i = 0 ; i < num_of_hop3 ; i++) {
401 clear_pte(ctx, hop2_pte_addr);
402 put_pte(ctx, hop2_addr);
403 hop2_pte_addr += HL_PTE_SIZE;
406 clear_pte(ctx, hop1_addr);
407 put_pte(ctx, hop1_addr);
408 clear_pte(ctx, hop0_addr);
410 kfree(ctx->dram_default_hops);
416 * hl_mmu_init() - initialize the MMU module.
417 * @hdev: habanalabs device structure.
419 * This function does the following:
420 * - Create a pool of pages for pgt_infos.
421 * - Create a shadow table for pgt
423 * Return: 0 for success, non-zero for failure.
425 int hl_mmu_init(struct hl_device *hdev)
427 struct asic_fixed_properties *prop = &hdev->asic_prop;
430 if (!hdev->mmu_enable)
434 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
436 if (!hdev->mmu_pgt_pool) {
437 dev_err(hdev->dev, "Failed to create page gen pool\n");
441 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
442 prop->mmu_hop0_tables_total_size,
443 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
446 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
450 hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
451 prop->mmu_hop_table_size,
452 GFP_KERNEL | __GFP_ZERO);
453 if (!hdev->mmu_shadow_hop0) {
458 /* MMU H/W init will be done in device hw_init() */
463 gen_pool_destroy(hdev->mmu_pgt_pool);
469 * hl_mmu_fini() - release the MMU module.
470 * @hdev: habanalabs device structure.
472 * This function does the following:
473 * - Disable MMU in H/W.
474 * - Free the pgt_infos pool.
476 * All contexts should be freed before calling this function.
478 void hl_mmu_fini(struct hl_device *hdev)
480 if (!hdev->mmu_enable)
483 /* MMU H/W fini was already done in device hw_fini() */
485 kvfree(hdev->mmu_shadow_hop0);
486 gen_pool_destroy(hdev->mmu_pgt_pool);
490 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
491 * @ctx: pointer to the context structure to initialize.
493 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
494 * page tables hops related to this context.
495 * Return: 0 on success, non-zero otherwise.
497 int hl_mmu_ctx_init(struct hl_ctx *ctx)
499 struct hl_device *hdev = ctx->hdev;
501 if (!hdev->mmu_enable)
504 mutex_init(&ctx->mmu_lock);
505 hash_init(ctx->mmu_shadow_hash);
507 return dram_default_mapping_init(ctx);
511 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
513 * @ctx: pointer to the context structure
515 * This function does the following:
516 * - Free any pgts which were not freed yet
518 * - Free DRAM default page mapping hops
520 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
522 struct hl_device *hdev = ctx->hdev;
523 struct pgt_info *pgt_info;
524 struct hlist_node *tmp;
527 if (!hdev->mmu_enable)
530 dram_default_mapping_fini(ctx);
532 if (!hash_empty(ctx->mmu_shadow_hash))
533 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
536 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
537 dev_err_ratelimited(hdev->dev,
538 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
539 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
540 _free_hop(ctx, pgt_info);
543 mutex_destroy(&ctx->mmu_lock);
546 static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
548 struct hl_device *hdev = ctx->hdev;
549 struct asic_fixed_properties *prop = &hdev->asic_prop;
550 struct hl_mmu_properties *mmu_prop;
551 u64 hop0_addr = 0, hop0_pte_addr = 0,
552 hop1_addr = 0, hop1_pte_addr = 0,
553 hop2_addr = 0, hop2_pte_addr = 0,
554 hop3_addr = 0, hop3_pte_addr = 0,
555 hop4_addr = 0, hop4_pte_addr = 0,
557 bool is_huge, clear_hop3 = true;
559 /* shifts and masks are the same in PMMU and HPMMU, use one of them */
560 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
562 hop0_addr = get_hop0_addr(ctx);
563 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
565 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
567 hop1_addr = get_next_hop_addr(ctx, curr_pte);
569 if (hop1_addr == ULLONG_MAX)
572 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
574 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
576 hop2_addr = get_next_hop_addr(ctx, curr_pte);
578 if (hop2_addr == ULLONG_MAX)
581 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
583 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
585 hop3_addr = get_next_hop_addr(ctx, curr_pte);
587 if (hop3_addr == ULLONG_MAX)
590 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
592 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
594 is_huge = curr_pte & LAST_MASK;
596 if (is_dram_addr && !is_huge) {
598 "DRAM unmapping should use huge pages only\n");
603 hop4_addr = get_next_hop_addr(ctx, curr_pte);
605 if (hop4_addr == ULLONG_MAX)
608 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
611 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
616 if (hdev->dram_default_page_mapping && is_dram_addr) {
617 u64 default_pte = (prop->mmu_dram_default_page_addr &
618 HOP_PHYS_ADDR_MASK) | LAST_MASK |
620 if (curr_pte == default_pte) {
622 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
627 if (!(curr_pte & PAGE_PRESENT_MASK)) {
629 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
634 write_final_pte(ctx, hop3_pte_addr, default_pte);
635 put_pte(ctx, hop3_addr);
637 if (!(curr_pte & PAGE_PRESENT_MASK))
641 clear_pte(ctx, hop4_pte_addr);
643 clear_pte(ctx, hop3_pte_addr);
645 if (hop4_addr && !put_pte(ctx, hop4_addr))
651 clear_pte(ctx, hop3_pte_addr);
653 if (put_pte(ctx, hop3_addr))
656 clear_pte(ctx, hop2_pte_addr);
658 if (put_pte(ctx, hop2_addr))
661 clear_pte(ctx, hop1_pte_addr);
663 if (put_pte(ctx, hop1_addr))
666 clear_pte(ctx, hop0_pte_addr);
673 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
680 * hl_mmu_unmap - unmaps a virtual addr
682 * @ctx: pointer to the context structure
683 * @virt_addr: virt addr to map from
684 * @page_size: size of the page to unmap
685 * @flush_pte: whether to do a PCI flush
687 * This function does the following:
688 * - Check that the virt addr is mapped
689 * - Unmap the virt addr and frees pgts if possible
690 * - Returns 0 on success, -EINVAL if the given addr is not mapped
692 * Because this function changes the page tables in the device and because it
693 * changes the MMU hash, it must be protected by a lock.
694 * However, because it maps only a single page, the lock should be implemented
695 * in a higher level in order to protect the entire mapping of the memory area
697 * For optimization reasons PCI flush may be requested once after unmapping of
700 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
703 struct hl_device *hdev = ctx->hdev;
704 struct asic_fixed_properties *prop = &hdev->asic_prop;
705 struct hl_mmu_properties *mmu_prop;
707 u32 real_page_size, npages;
711 if (!hdev->mmu_enable)
714 is_dram_addr = is_dram_va(hdev, virt_addr);
717 mmu_prop = &prop->dmmu;
718 else if ((page_size % prop->pmmu_huge.page_size) == 0)
719 mmu_prop = &prop->pmmu_huge;
721 mmu_prop = &prop->pmmu;
724 * The H/W handles mapping of specific page sizes. Hence if the page
725 * size is bigger, we break it to sub-pages and unmap them separately.
727 if ((page_size % mmu_prop->page_size) == 0) {
728 real_page_size = mmu_prop->page_size;
731 "page size of %u is not %uKB aligned, can't unmap\n",
732 page_size, mmu_prop->page_size >> 10);
737 npages = page_size / real_page_size;
738 real_virt_addr = virt_addr;
740 for (i = 0 ; i < npages ; i++) {
741 rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
745 real_virt_addr += real_page_size;
754 static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
755 u32 page_size, bool is_dram_addr)
757 struct hl_device *hdev = ctx->hdev;
758 struct asic_fixed_properties *prop = &hdev->asic_prop;
759 struct hl_mmu_properties *mmu_prop;
760 u64 hop0_addr = 0, hop0_pte_addr = 0,
761 hop1_addr = 0, hop1_pte_addr = 0,
762 hop2_addr = 0, hop2_pte_addr = 0,
763 hop3_addr = 0, hop3_pte_addr = 0,
764 hop4_addr = 0, hop4_pte_addr = 0,
766 bool hop1_new = false, hop2_new = false, hop3_new = false,
767 hop4_new = false, is_huge;
771 * This mapping function can map a page or a huge page. For huge page
772 * there are only 3 hops rather than 4. Currently the DRAM allocation
773 * uses huge pages only but user memory could have been allocated with
774 * one of the two page sizes. Since this is a common code for all the
775 * three cases, we need this hugs page check.
778 mmu_prop = &prop->dmmu;
780 } else if (page_size == prop->pmmu_huge.page_size) {
781 mmu_prop = &prop->pmmu_huge;
784 mmu_prop = &prop->pmmu;
788 hop0_addr = get_hop0_addr(ctx);
789 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
790 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
792 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
793 if (hop1_addr == ULLONG_MAX)
796 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
797 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
799 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
800 if (hop2_addr == ULLONG_MAX)
803 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
804 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
806 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
807 if (hop3_addr == ULLONG_MAX)
810 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
811 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
814 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
815 if (hop4_addr == ULLONG_MAX)
818 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
820 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
823 if (hdev->dram_default_page_mapping && is_dram_addr) {
824 u64 default_pte = (prop->mmu_dram_default_page_addr &
825 HOP_PHYS_ADDR_MASK) | LAST_MASK |
828 if (curr_pte != default_pte) {
830 "DRAM: mapping already exists for virt_addr 0x%llx\n",
836 if (hop1_new || hop2_new || hop3_new || hop4_new) {
838 "DRAM mapping should not allocate more hops\n");
842 } else if (curr_pte & PAGE_PRESENT_MASK) {
844 "mapping already exists for virt_addr 0x%llx\n",
847 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
848 *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
849 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
850 *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
851 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
852 *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
853 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
854 *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
857 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
858 *(u64 *) (uintptr_t) hop4_pte_addr,
865 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
869 write_final_pte(ctx, hop3_pte_addr, curr_pte);
871 write_final_pte(ctx, hop4_pte_addr, curr_pte);
875 (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
876 write_pte(ctx, hop0_pte_addr, curr_pte);
880 (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
881 write_pte(ctx, hop1_pte_addr, curr_pte);
882 get_pte(ctx, hop1_addr);
886 (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
887 write_pte(ctx, hop2_pte_addr, curr_pte);
888 get_pte(ctx, hop2_addr);
893 curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
895 write_pte(ctx, hop3_pte_addr, curr_pte);
896 get_pte(ctx, hop3_addr);
899 get_pte(ctx, hop4_addr);
901 get_pte(ctx, hop3_addr);
908 free_hop(ctx, hop4_addr);
910 free_hop(ctx, hop3_addr);
912 free_hop(ctx, hop2_addr);
914 free_hop(ctx, hop1_addr);
920 * hl_mmu_map - maps a virtual addr to physical addr
922 * @ctx: pointer to the context structure
923 * @virt_addr: virt addr to map from
924 * @phys_addr: phys addr to map to
925 * @page_size: physical page size
926 * @flush_pte: whether to do a PCI flush
928 * This function does the following:
929 * - Check that the virt addr is not mapped
930 * - Allocate pgts as necessary in order to map the virt addr to the phys
931 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
933 * Because this function changes the page tables in the device and because it
934 * changes the MMU hash, it must be protected by a lock.
935 * However, because it maps only a single page, the lock should be implemented
936 * in a higher level in order to protect the entire mapping of the memory area
938 * For optimization reasons PCI flush may be requested once after mapping of
941 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
944 struct hl_device *hdev = ctx->hdev;
945 struct asic_fixed_properties *prop = &hdev->asic_prop;
946 struct hl_mmu_properties *mmu_prop;
947 u64 real_virt_addr, real_phys_addr;
948 u32 real_page_size, npages;
949 int i, rc, mapped_cnt = 0;
952 if (!hdev->mmu_enable)
955 is_dram_addr = is_dram_va(hdev, virt_addr);
958 mmu_prop = &prop->dmmu;
959 else if ((page_size % prop->pmmu_huge.page_size) == 0)
960 mmu_prop = &prop->pmmu_huge;
962 mmu_prop = &prop->pmmu;
965 * The H/W handles mapping of specific page sizes. Hence if the page
966 * size is bigger, we break it to sub-pages and map them separately.
968 if ((page_size % mmu_prop->page_size) == 0) {
969 real_page_size = mmu_prop->page_size;
972 "page size of %u is not %uKB aligned, can't unmap\n",
973 page_size, mmu_prop->page_size >> 10);
978 WARN_ONCE((phys_addr & (real_page_size - 1)),
979 "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
980 phys_addr, real_page_size);
982 npages = page_size / real_page_size;
983 real_virt_addr = virt_addr;
984 real_phys_addr = phys_addr;
986 for (i = 0 ; i < npages ; i++) {
987 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
988 real_page_size, is_dram_addr);
992 real_virt_addr += real_page_size;
993 real_phys_addr += real_page_size;
1003 real_virt_addr = virt_addr;
1004 for (i = 0 ; i < mapped_cnt ; i++) {
1005 if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
1006 dev_warn_ratelimited(hdev->dev,
1007 "failed to unmap va: 0x%llx\n", real_virt_addr);
1009 real_virt_addr += real_page_size;
1018 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
1020 * @ctx: pointer to the context structure
1023 void hl_mmu_swap_out(struct hl_ctx *ctx)
1029 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
1031 * @ctx: pointer to the context structure
1034 void hl_mmu_swap_in(struct hl_ctx *ctx)