The PCI MMU cache is two layered. The upper layer, memcache, uses cache
lines, the bottom layer doesn't.
Hence, after PMMU map operation we have to invalidate memcache, to avoid
the situation where the new entry is already in the cache due to its
cache line being fully in the cache.
However, we do not have to invalidate the lower cache, and here we can
optimize, since cache invalidation is time consuming.
Signed-off-by: Yuri Nudelman <ynudelman@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
offset += va_block->size;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false,
+ MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
mutex_unlock(&ctx->mmu_lock);
* enum mmu_op_flags - mmu operation relevant information.
* @MMU_OP_USERPTR: operation on user memory (host resident).
* @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
+ * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
+ * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
*/
enum mmu_op_flags {
MMU_OP_USERPTR = 0x1,
- MMU_OP_PHYS_PACK = 0x2
+ MMU_OP_PHYS_PACK = 0x2,
+ MMU_OP_CLEAR_MEMCACHE = 0x4,
+ MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
};
}
rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false,
- *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
+ ctx->asid, ret_vaddr, phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);