1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
8 #include <linux/slab.h>
10 #include "../habanalabs.h"
12 #include <trace/events/habanalabs.h>
15 * hl_mmu_get_funcs() - get MMU functions structure
16 * @hdev: habanalabs device structure.
17 * @pgt_residency: page table residency.
18 * @is_dram_addr: true if we need HMMU functions
20 * @return appropriate MMU functions structure
22 static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
25 return &hdev->mmu_func[pgt_residency];
28 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
30 struct asic_fixed_properties *prop = &hdev->asic_prop;
32 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
33 prop->dmmu.start_addr,
38 * hl_mmu_init() - initialize the MMU module.
39 * @hdev: habanalabs device structure.
41 * Return: 0 for success, non-zero for failure.
43 int hl_mmu_init(struct hl_device *hdev)
47 if (!hdev->mmu_enable)
50 mutex_init(&hdev->mmu_lock);
52 if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
53 rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
58 if (hdev->mmu_func[MMU_HR_PGT].init != NULL) {
59 rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
67 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
68 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
74 * hl_mmu_fini() - release the MMU module.
75 * @hdev: habanalabs device structure.
77 * This function does the following:
78 * - Disable MMU in H/W.
79 * - Free the pgt_infos pool.
81 * All contexts should be freed before calling this function.
83 void hl_mmu_fini(struct hl_device *hdev)
85 if (!hdev->mmu_enable)
88 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
89 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
91 if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
92 hdev->mmu_func[MMU_HR_PGT].fini(hdev);
94 mutex_destroy(&hdev->mmu_lock);
98 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
99 * @ctx: pointer to the context structure to initialize.
101 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
102 * page tables hops related to this context.
103 * Return: 0 on success, non-zero otherwise.
105 int hl_mmu_ctx_init(struct hl_ctx *ctx)
107 struct hl_device *hdev = ctx->hdev;
108 int rc = -EOPNOTSUPP;
110 if (!hdev->mmu_enable)
113 if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
114 rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
119 if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) {
120 rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
128 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
129 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
135 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
137 * @ctx: pointer to the context structure
139 * This function does the following:
140 * - Free any pgts which were not freed yet
142 * - Free DRAM default page mapping hops
144 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
146 struct hl_device *hdev = ctx->hdev;
148 if (!hdev->mmu_enable)
151 if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
152 hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx);
154 if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
155 hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
159 * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
161 * @hdev: pointer to device data.
162 * @mmu_prop: MMU properties.
163 * @page_size: page size
164 * @real_page_size: set here the actual page size to use for the operation
165 * @is_dram_addr: true if DRAM address, otherwise false.
167 * @return 0 on success, otherwise non 0 error code
169 * note that this is general implementation that can fit most MMU arch. but as this is used as an
171 * 1. it shall not be called directly- only from mmu_func structure instance
172 * 2. each MMU may modify the implementation internally
174 int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
175 u32 page_size, u32 *real_page_size, bool is_dram_addr)
178 * The H/W handles mapping of specific page sizes. Hence if the page
179 * size is bigger, we break it to sub-pages and map them separately.
181 if ((page_size % mmu_prop->page_size) == 0) {
182 *real_page_size = mmu_prop->page_size;
186 dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
187 page_size, mmu_prop->page_size >> 10);
192 static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
195 struct asic_fixed_properties *prop = &hdev->asic_prop;
199 else if ((page_size % prop->pmmu_huge.page_size) == 0)
200 return &prop->pmmu_huge;
206 * hl_mmu_unmap_page - unmaps a virtual addr
208 * @ctx: pointer to the context structure
209 * @virt_addr: virt addr to map from
210 * @page_size: size of the page to unmap
211 * @flush_pte: whether to do a PCI flush
213 * This function does the following:
214 * - Check that the virt addr is mapped
215 * - Unmap the virt addr and frees pgts if possible
216 * - Returns 0 on success, -EINVAL if the given addr is not mapped
218 * Because this function changes the page tables in the device and because it
219 * changes the MMU hash, it must be protected by a lock.
220 * However, because it maps only a single page, the lock should be implemented
221 * in a higher level in order to protect the entire mapping of the memory area
223 * For optimization reasons PCI flush may be requested once after unmapping of
226 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
228 struct hl_device *hdev = ctx->hdev;
229 struct hl_mmu_properties *mmu_prop;
230 struct hl_mmu_funcs *mmu_funcs;
231 int i, pgt_residency, rc = 0;
232 u32 real_page_size, npages;
236 if (!hdev->mmu_enable)
239 is_dram_addr = hl_is_dram_va(hdev, virt_addr);
240 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
242 pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
243 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
245 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
250 npages = page_size / real_page_size;
251 real_virt_addr = virt_addr;
253 for (i = 0 ; i < npages ; i++) {
254 rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr);
258 real_virt_addr += real_page_size;
262 mmu_funcs->flush(ctx);
264 if (trace_habanalabs_mmu_unmap_enabled() && !rc)
265 trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
271 * hl_mmu_map_page - maps a virtual addr to physical addr
273 * @ctx: pointer to the context structure
274 * @virt_addr: virt addr to map from
275 * @phys_addr: phys addr to map to
276 * @page_size: physical page size
277 * @flush_pte: whether to do a PCI flush
279 * This function does the following:
280 * - Check that the virt addr is not mapped
281 * - Allocate pgts as necessary in order to map the virt addr to the phys
282 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
284 * Because this function changes the page tables in the device and because it
285 * changes the MMU hash, it must be protected by a lock.
286 * However, because it maps only a single page, the lock should be implemented
287 * in a higher level in order to protect the entire mapping of the memory area
289 * For optimization reasons PCI flush may be requested once after mapping of
292 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
295 int i, rc, pgt_residency, mapped_cnt = 0;
296 struct hl_device *hdev = ctx->hdev;
297 struct hl_mmu_properties *mmu_prop;
298 u64 real_virt_addr, real_phys_addr;
299 struct hl_mmu_funcs *mmu_funcs;
300 u32 real_page_size, npages;
304 if (!hdev->mmu_enable)
307 is_dram_addr = hl_is_dram_va(hdev, virt_addr);
308 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
310 pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
311 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
313 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
319 * Verify that the phys and virt addresses are aligned with the
320 * MMU page size (in dram this means checking the address and MMU
324 ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
325 (mmu_prop->page_size - 1)) ||
326 (hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
327 (mmu_prop->page_size - 1)))) ||
328 (!is_dram_addr && ((phys_addr & (real_page_size - 1)) ||
329 (virt_addr & (real_page_size - 1)))))
331 "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size",
332 phys_addr, virt_addr, real_page_size);
334 npages = page_size / real_page_size;
335 real_virt_addr = virt_addr;
336 real_phys_addr = phys_addr;
338 for (i = 0 ; i < npages ; i++) {
339 rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
344 real_virt_addr += real_page_size;
345 real_phys_addr += real_page_size;
350 mmu_funcs->flush(ctx);
352 trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
357 real_virt_addr = virt_addr;
358 for (i = 0 ; i < mapped_cnt ; i++) {
359 if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr))
360 dev_warn_ratelimited(hdev->dev,
361 "failed to unmap va: 0x%llx\n", real_virt_addr);
363 real_virt_addr += real_page_size;
366 mmu_funcs->flush(ctx);
372 * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page
373 * for mapping contiguous physical memory
375 * @ctx: pointer to the context structure
376 * @virt_addr: virt addr to map from
377 * @phys_addr: phys addr to map to
381 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
382 u64 phys_addr, u32 size)
384 struct hl_device *hdev = ctx->hdev;
385 struct asic_fixed_properties *prop = &hdev->asic_prop;
386 u64 curr_va, curr_pa;
391 if (hl_mem_area_inside_range(virt_addr, size,
392 prop->dmmu.start_addr, prop->dmmu.end_addr))
393 page_size = prop->dmmu.page_size;
394 else if (hl_mem_area_inside_range(virt_addr, size,
395 prop->pmmu.start_addr, prop->pmmu.end_addr))
396 page_size = prop->pmmu.page_size;
397 else if (hl_mem_area_inside_range(virt_addr, size,
398 prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
399 page_size = prop->pmmu_huge.page_size;
403 for (off = 0 ; off < size ; off += page_size) {
404 curr_va = virt_addr + off;
405 curr_pa = phys_addr + off;
406 flush_pte = (off + page_size) >= size;
407 rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size,
411 "Map failed for va 0x%llx to pa 0x%llx\n",
413 /* last mapping failed so don't try to unmap it - reduce off by page_size */
422 for (; off >= 0 ; off -= page_size) {
423 curr_va = virt_addr + off;
424 flush_pte = (off - (s32) page_size) < 0;
425 if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte))
426 dev_warn_ratelimited(hdev->dev,
427 "failed to unmap va 0x%llx\n", curr_va);
434 * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page
435 * for unmapping contiguous physical memory
437 * @ctx: pointer to the context structure
438 * @virt_addr: virt addr to unmap
439 * @size: size to unmap
442 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
444 struct hl_device *hdev = ctx->hdev;
445 struct asic_fixed_properties *prop = &hdev->asic_prop;
451 if (hl_mem_area_inside_range(virt_addr, size,
452 prop->dmmu.start_addr, prop->dmmu.end_addr))
453 page_size = prop->dmmu.page_size;
454 else if (hl_mem_area_inside_range(virt_addr, size,
455 prop->pmmu.start_addr, prop->pmmu.end_addr))
456 page_size = prop->pmmu.page_size;
457 else if (hl_mem_area_inside_range(virt_addr, size,
458 prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
459 page_size = prop->pmmu_huge.page_size;
463 for (off = 0 ; off < size ; off += page_size) {
464 curr_va = virt_addr + off;
465 flush_pte = (off + page_size) >= size;
466 rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte);
468 dev_warn_ratelimited(hdev->dev,
469 "Unmap failed for va 0x%llx\n", curr_va);
476 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
478 * @ctx: pointer to the context structure
481 void hl_mmu_swap_out(struct hl_ctx *ctx)
483 struct hl_device *hdev = ctx->hdev;
485 if (!hdev->mmu_enable)
488 if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL)
489 hdev->mmu_func[MMU_DR_PGT].swap_out(ctx);
491 if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL)
492 hdev->mmu_func[MMU_HR_PGT].swap_out(ctx);
496 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
498 * @ctx: pointer to the context structure
501 void hl_mmu_swap_in(struct hl_ctx *ctx)
503 struct hl_device *hdev = ctx->hdev;
505 if (!hdev->mmu_enable)
508 if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL)
509 hdev->mmu_func[MMU_DR_PGT].swap_in(ctx);
511 if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL)
512 hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
515 static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
516 struct hl_mmu_hop_info *hops,
519 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
520 u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
521 struct hl_mmu_properties *mmu_prop;
523 /* last hop holds the phys address and flags */
524 if (hops->unscrambled_paddr)
525 tmp_phys_addr = hops->unscrambled_paddr;
527 tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
529 if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
530 mmu_prop = &prop->pmmu_huge;
531 else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
532 mmu_prop = &prop->pmmu;
533 else /* HL_VA_RANGE_TYPE_DRAM */
534 mmu_prop = &prop->dmmu;
536 if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
537 !is_power_of_2(prop->dram_page_size)) {
538 u64 dram_page_size, dram_base, abs_phys_addr, abs_virt_addr,
543 * Bit arithmetic cannot be used for non power of two page
544 * sizes. In addition, since bit arithmetic is not used,
545 * we cannot ignore dram base. All that shall be considered.
548 dram_page_size = prop->dram_page_size;
549 dram_base = prop->dram_base_address;
550 abs_phys_addr = tmp_phys_addr - dram_base;
551 abs_virt_addr = virt_addr - dram_base;
552 page_id = DIV_ROUND_DOWN_ULL(abs_phys_addr, dram_page_size);
553 page_start = page_id * dram_page_size;
554 div_u64_rem(abs_virt_addr, dram_page_size, &page_off);
556 *phys_addr = page_start + page_off + dram_base;
559 * find the correct hop shift field in hl_mmu_properties
560 * structure in order to determine the right masks
561 * for the page offset.
563 hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1];
564 offset_mask = (1ull << hop_shift) - 1;
565 addr_mask = ~(offset_mask);
566 *phys_addr = (tmp_phys_addr & addr_mask) |
567 (virt_addr & offset_mask);
571 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
573 struct hl_mmu_hop_info hops;
576 memset(&hops, 0, sizeof(hops));
578 rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
582 hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr);
587 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
588 struct hl_mmu_hop_info *hops)
590 struct hl_device *hdev = ctx->hdev;
591 struct asic_fixed_properties *prop;
592 struct hl_mmu_properties *mmu_prop;
593 struct hl_mmu_funcs *mmu_funcs;
594 int pgt_residency, rc;
597 if (!hdev->mmu_enable)
600 prop = &hdev->asic_prop;
601 hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
603 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
604 prop->dmmu.start_addr,
605 prop->dmmu.end_addr);
607 /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */
608 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
609 pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
610 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
612 mutex_lock(&hdev->mmu_lock);
613 rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
614 mutex_unlock(&hdev->mmu_lock);
619 /* add page offset to physical address */
620 if (hops->unscrambled_paddr)
621 hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr);
626 int hl_mmu_if_set_funcs(struct hl_device *hdev)
628 if (!hdev->mmu_enable)
631 switch (hdev->asic_type) {
635 hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
639 /* MMUs in Gaudi2 are always host resident */
640 hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
643 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
652 * hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
653 * @hdev: pointer to device data.
654 * @addr: The address to scramble.
656 * Return: The scrambled address.
658 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
664 * hl_mmu_descramble_addr() - The generic mmu address descrambling
666 * @hdev: pointer to device data.
667 * @addr: The address to descramble.
669 * Return: The un-scrambled address.
671 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
676 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
680 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
682 dev_err_ratelimited(hdev->dev,
683 "%s cache invalidation failed, rc=%d\n",
684 flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
689 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
690 u32 flags, u32 asid, u64 va, u64 size)
694 rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
697 dev_err_ratelimited(hdev->dev,
698 "%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
699 flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
704 static void hl_mmu_prefetch_work_function(struct work_struct *work)
706 struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work);
707 struct hl_ctx *ctx = pfw->ctx;
708 struct hl_device *hdev = ctx->hdev;
710 if (!hl_device_operational(hdev, NULL))
713 mutex_lock(&hdev->mmu_lock);
715 hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size);
717 mutex_unlock(&hdev->mmu_lock);
721 * context was taken in the common mmu prefetch function- see comment there about
728 int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
730 struct hl_prefetch_work *handle_prefetch_work;
732 handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL);
733 if (!handle_prefetch_work)
736 INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function);
737 handle_prefetch_work->ctx = ctx;
738 handle_prefetch_work->va = va;
739 handle_prefetch_work->size = size;
740 handle_prefetch_work->flags = flags;
741 handle_prefetch_work->asid = asid;
744 * as actual prefetch is done in a WQ we must get the context (and put it
745 * at the end of the work function)
748 queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work);
753 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
755 return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX;
759 * hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
760 * @ctx: pointer to the context structure to initialize.
761 * @mmu_prop: MMU properties.
762 * @hop_idx: HOP index.
763 * @hop_addr: HOP address.
764 * @virt_addr: virtual address for the translation.
766 * @return the matching PTE value on success, otherwise U64_MAX.
768 u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
769 u8 hop_idx, u64 hop_addr, u64 virt_addr)
773 if (hop_idx >= mmu_prop->num_hops) {
774 dev_err_ratelimited(ctx->hdev->dev, "Invalid hop index %d\n", hop_idx);
778 shift = mmu_prop->hop_shifts[hop_idx];
779 mask = mmu_prop->hop_masks[hop_idx];
781 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
784 static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
785 struct gen_pool_chunk *chunk,
788 struct hl_device *hdev = data;
790 hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
791 (void *)chunk->start_addr, chunk->phys_addr);
794 void hl_mmu_hr_flush(struct hl_ctx *ctx)
796 /* a flush operation requires memory barrier */
801 * hl_mmu_hr_pool_destroy() - destroy genpool
802 * @hdev: habanalabs device structure.
803 * @hr_priv: MMU HR private data.
804 * @hop_table_size: HOP table size.
806 * This function does the following:
807 * - free entries allocated for shadow HOP0
811 static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
814 struct asic_fixed_properties *prop = &hdev->asic_prop;
815 struct gen_pool **pool = &hr_priv->mmu_pgt_pool;
816 struct pgt_info *hop0_pgt;
819 if (ZERO_OR_NULL_PTR(*pool))
822 /* Free the Fixed allocation of HOPs0 */
823 if (hr_priv->mmu_asid_hop0) {
824 for (asid = 0 ; asid < prop->max_asid ; asid++) {
825 hop0_pgt = &hr_priv->mmu_asid_hop0[asid];
826 if (ZERO_OR_NULL_PTR(hop0_pgt->virt_addr))
829 gen_pool_free(*pool, (uintptr_t) hop0_pgt->virt_addr, hop_table_size);
833 gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev);
834 gen_pool_destroy(*pool);
836 /* Make sure that if we arrive here again without init was called we
837 * won't cause kernel panic. This can happen for example if we fail
838 * during hard reset code at certain points
844 * hl_mmu_hr_init() - initialize the MMU module.
845 * @hdev: habanalabs device structure.
846 * @hr_priv: MMU HR private data.
847 * @hop_table_size: HOP table size.
848 * @pgt_size: memory size allocated for the page table
850 * @return 0 on success otherwise non-zero error code
852 * This function does the following:
853 * - Create a pool of pages for pgt_infos.
854 * - Create a shadow table for pgt
856 int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
859 struct asic_fixed_properties *prop = &hdev->asic_prop;
860 size_t pool_chunk_size = SZ_4M;
861 struct pgt_info *hop0_pgt;
867 * we set alloc size as PAGE_SIZE (sine dma_alloc_coherent allocation order/size is
868 * PAGE_SHIFT/PAGE_SIZE) in order to be able to control the allocations alignment.
869 * This way we can call "DMA alloc align" according to dma_alloc granularity and supply
870 * allocations with higher-order alignment restrictions
872 hr_priv->mmu_pgt_pool = gen_pool_create(PAGE_SHIFT, -1);
873 if (ZERO_OR_NULL_PTR(hr_priv->mmu_pgt_pool)) {
874 dev_err(hdev->dev, "Failed to create hr page pool\n");
878 hr_priv->mmu_asid_hop0 = kvcalloc(prop->max_asid, sizeof(struct pgt_info), GFP_KERNEL);
879 if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
880 dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
882 goto destroy_mmu_pgt_pool;
885 for (i = 0 ; i < pgt_size ; i += pool_chunk_size) {
886 virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size,
888 GFP_KERNEL | __GFP_ZERO);
889 if (ZERO_OR_NULL_PTR(virt_addr)) {
891 "Failed to allocate memory for host-resident page pool\n");
893 goto destroy_mmu_pgt_pool;
896 rc = gen_pool_add_virt(hr_priv->mmu_pgt_pool, virt_addr, (phys_addr_t) dma_addr,
897 pool_chunk_size, -1);
899 dev_err(hdev->dev, "Failed to fill host-resident page pool\n");
900 goto destroy_mmu_pgt_pool;
904 for (i = 0 ; i < prop->max_asid ; i++) {
905 hop0_pgt = &hr_priv->mmu_asid_hop0[i];
906 hop0_pgt->virt_addr = (uintptr_t)
907 gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
909 (dma_addr_t *) &hop0_pgt->phys_addr,
911 if (!hop0_pgt->virt_addr) {
912 dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
914 goto destroy_mmu_pgt_pool;
918 /* MMU H/W init will be done in device hw_init() */
922 destroy_mmu_pgt_pool:
923 hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
924 if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0))
925 kvfree(hr_priv->mmu_asid_hop0);
931 * hl_mmu_hr_fini() - release the MMU module.
932 * @hdev: habanalabs device structure.
933 * @hr_priv: MMU host resident private info.
934 * @hop_table_size: HOP table size
936 * This function does the following:
937 * - Disable MMU in H/W.
938 * - Free the pgt_infos pool.
940 * All contexts should be freed before calling this function.
942 void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
944 /* MMU H/W fini was already done in device hw_fini() */
946 hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
948 if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
949 kvfree(hr_priv->mmu_asid_hop0);
951 /* Make sure that if we arrive here again without init was
952 * called we won't cause kernel panic. This can happen for
953 * example if we fail during hard reset code at certain points
955 hr_priv->mmu_asid_hop0 = NULL;
960 * hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash
961 * @pgt_info: page table info structure.
962 * @hr_priv: MMU HR private data.
963 * @hop_table_size: HOP table size.
965 void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
968 gen_pool_free(hr_priv->mmu_pgt_pool, pgt_info->virt_addr, hop_table_size);
969 hash_del(&pgt_info->node);
974 * hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr
975 * @ctx: pointer to the context structure
976 * @pgt: pgt_info for the HOP hosting the PTE
977 * @phys_pte_addr: phys address of the PTE
978 * @hop_table_size: HOP table size
980 * @return PTE virtual address
982 * The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr
983 * by adding the PTE offset.
985 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt,
986 u64 phys_pte_addr, u32 hop_table_size)
988 u64 page_mask = (hop_table_size - 1);
989 u64 pte_offset = phys_pte_addr & page_mask;
991 return pgt->virt_addr + pte_offset;
995 * hl_mmu_hr_write_pte() - write HR PTE
996 * @ctx: pointer to the context structure
997 * @pgt_info: HOP's page table info structure
998 * @phys_pte_addr: phys PTE address
1000 * @hop_table_size: HOP table size
1002 void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
1003 u64 val, u32 hop_table_size)
1006 * The value to write is the phys address of the next hop +
1007 * flags at the 12 LSBs.
1009 u64 virt_addr = hl_mmu_hr_pte_phys_to_virt(ctx, pgt_info, phys_pte_addr, hop_table_size);
1011 *((u64 *) (uintptr_t) virt_addr) = val;
1015 * hl_mmu_hr_clear_pte() - clear HR PTE
1016 * @ctx: pointer to the context structure
1017 * @pgt_info: HOP's page table info structure
1018 * @phys_pte_addr: phys PTE address
1019 * @hop_table_size: HOP table size
1021 void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
1024 /* no need to transform the value to physical address */
1025 hl_mmu_hr_write_pte(ctx, pgt_info, phys_pte_addr, 0, hop_table_size);
1029 * hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs)
1030 * @ctx: pointer to the context structure
1031 * @pgt_info: HOP's page table info structure
1032 * @hr_priv: HR MMU private info
1033 * @hop_table_size: HOP table size
1035 * @return number of PTEs still in the HOP
1037 int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info,
1038 struct hl_mmu_hr_priv *hr_priv,
1041 int num_of_ptes_left;
1043 pgt_info->num_of_ptes--;
1046 * Need to save the number of ptes left because free_hop might free
1049 num_of_ptes_left = pgt_info->num_of_ptes;
1050 if (!num_of_ptes_left)
1051 hl_mmu_hr_free_hop_remove_pgt(pgt_info, hr_priv, hop_table_size);
1053 return num_of_ptes_left;
1057 * hl_mmu_hr_get_pte() - increase PGT PTE count
1058 * @ctx: pointer to the context structure
1059 * @hr_func: host resident functions
1060 * @phys_hop_addr: HOP phys address
1062 void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr)
1064 hr_func->get_pgt_info(ctx, phys_hop_addr)->num_of_ptes++;
1068 * hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP
1069 * @ctx: pointer to the context structure.
1070 * @hr_func: host resident functions.
1071 * @curr_pte: current PTE value.
1073 * @return pgt_info structure on success, otherwise NULL.
1075 struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
1076 struct hl_hr_mmu_funcs *hr_func,
1079 u64 next_hop_phys_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
1081 if (next_hop_phys_addr == ULLONG_MAX)
1084 return hr_func->get_pgt_info(ctx, next_hop_phys_addr);
1088 * hl_mmu_hr_alloc_hop() - allocate HOP
1089 * @ctx: pointer to the context structure.
1090 * @hr_priv: host resident private info structure.
1091 * @hr_func: host resident functions.
1092 * @mmu_prop: MMU properties.
1094 * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
1096 struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
1097 struct hl_hr_mmu_funcs *hr_func,
1098 struct hl_mmu_properties *mmu_prop)
1100 struct hl_device *hdev = ctx->hdev;
1101 struct pgt_info *pgt_info;
1102 dma_addr_t phys_addr;
1106 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
1110 for (i = 0; i <= retry; i++) {
1111 virt_addr = gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
1112 mmu_prop->hop_table_size,
1114 mmu_prop->hop_table_size);
1118 /* No memory in pool - get some and try again */
1119 virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr,
1120 GFP_KERNEL | __GFP_ZERO);
1121 if (ZERO_OR_NULL_PTR(virt_addr))
1124 if (gen_pool_add_virt(hr_priv->mmu_pgt_pool, (unsigned long)virt_addr,
1125 phys_addr, SZ_2M, -1)) {
1126 hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr);
1132 if (ZERO_OR_NULL_PTR(virt_addr)) {
1133 dev_err(hdev->dev, "failed to allocate page\n");
1134 goto pool_alloc_err;
1137 pgt_info->phys_addr = phys_addr;
1138 pgt_info->shadow_addr = (unsigned long) NULL;
1139 pgt_info->virt_addr = (unsigned long)virt_addr;
1140 pgt_info->ctx = ctx;
1141 pgt_info->num_of_ptes = 0;
1142 hr_func->add_pgt_info(ctx, pgt_info, phys_addr);
1153 * hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist
1154 * @ctx: pointer to the context structure.
1155 * @hr_priv: host resident private info structure.
1156 * @hr_func: host resident functions.
1157 * @mmu_prop: MMU properties.
1158 * @curr_pte: current PTE value.
1159 * @is_new_hop: set to true if HOP is new (caller responsibility to set it to false).
1161 * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
1163 struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
1164 struct hl_mmu_hr_priv *hr_priv,
1165 struct hl_hr_mmu_funcs *hr_func,
1166 struct hl_mmu_properties *mmu_prop,
1167 u64 curr_pte, bool *is_new_hop)
1169 u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
1171 if (hop_addr != ULLONG_MAX)
1172 return hr_func->get_pgt_info(ctx, hop_addr);
1175 return hl_mmu_hr_alloc_hop(ctx, hr_priv, hr_func, mmu_prop);
1179 * hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping)
1180 * @ctx: pointer to the context structure.
1181 * @virt_addr: the virt address for which to get info.
1182 * @hops: HOPs info structure.
1183 * @hr_func: host resident functions.
1185 * @return 0 on success, otherwise non 0 error code..
1187 int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
1188 struct hl_hr_mmu_funcs *hr_func)
1190 /* using 6 HOPs as this is the maximum number of HOPs */
1191 struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
1192 struct hl_device *hdev = ctx->hdev;
1193 struct hl_mmu_properties *mmu_prop;
1194 int rc, i, used_hops;
1197 rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge);
1201 used_hops = mmu_prop->num_hops;
1203 /* huge pages use one less hop */
1207 hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
1209 for (i = 0 ; i < used_hops ; i++) {
1211 hops_pgt_info[i] = hr_func->get_hop0_pgt_info(ctx);
1213 hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx, hr_func,
1214 hops->hop_info[i - 1].hop_pte_val);
1216 if (!hops_pgt_info[i])
1219 hops->hop_info[i].hop_addr = hops_pgt_info[i]->phys_addr;
1220 hops->hop_info[i].hop_pte_addr =
1221 hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
1222 hops->hop_info[i].hop_addr,
1223 hops->scrambled_vaddr);
1224 hops->hop_info[i].hop_pte_val = *(u64 *) (uintptr_t)
1225 hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
1226 hops->hop_info[i].hop_pte_addr,
1227 mmu_prop->hop_table_size);
1229 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
1232 if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
1236 /* if passed over all hops then no last hop was found */
1237 if (i == mmu_prop->num_hops)
1240 if (hops->scrambled_vaddr != virt_addr)
1241 hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
1242 (hdev, hops->hop_info[i].hop_pte_val);
1244 hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
1246 hops->used_hops = i + 1;