return ret;
}
pdd->drm_priv = drm_file->private_data;
+ pdd->tlb_seq = 0;
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
+ struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
+ uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
struct kfd_dev *dev = pdd->dev;
+ if (pdd->tlb_seq == tlb_seq)
+ return;
+
+ pdd->tlb_seq = tlb_seq;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.