unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg(mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir)))
+ if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
+ &dir)))
return H_TOO_HARD;
if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret != H_SUCCESS)
- iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
if (mm_iommu_mapped_inc(mem))
return H_TOO_HARD;
- ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
return H_TOO_HARD;
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
+ iommu_tce_kill(stit->tbl, entry, 1);
+
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
goto unlock_exit;
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
- goto unlock_exit;
+ goto invalidate_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto unlock_exit;
+ goto invalidate_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
entry);
- goto unlock_exit;
+ goto invalidate_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
+invalidate_exit:
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+ iommu_tce_kill(stit->tbl, entry, npages);
+
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
continue;
if (ret == H_TOO_HARD)
- return ret;
+ goto invalidate_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
- return H_SUCCESS;
+invalidate_exit:
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+ iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
return H_SUCCESS;
}
-static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
+static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
+ struct iommu_table *tbl,
unsigned long entry, unsigned long *hpa,
enum dma_data_direction *direction)
{
long ret;
- ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+ ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL))) {
return ret;
}
+extern void iommu_tce_kill_rm(struct iommu_table *tbl,
+ unsigned long entry, unsigned long pages)
+{
+ if (tbl->it_ops->tce_kill)
+ tbl->it_ops->tce_kill(tbl, entry, pages, true);
+}
+
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
+ if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
/*
* real mode xchg can fail if struct page crosses
* a page boundary
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret)
- iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_TOO_HARD;
- ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
mm_iommu_mapped_dec(mem);
/*
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
+ iommu_tce_kill_rm(stit->tbl, entry, 1);
+
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
return ret;
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
ret = H_PARAMETER;
- goto unlock_exit;
+ goto invalidate_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
entry);
- goto unlock_exit;
+ goto invalidate_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
+invalidate_exit:
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+ iommu_tce_kill_rm(stit->tbl, entry, npages);
+
unlock_exit:
if (rmap)
unlock_rmap(rmap);
continue;
if (ret == H_TOO_HARD)
- return ret;
+ goto invalidate_exit;
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
- return H_SUCCESS;
+invalidate_exit:
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+ iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
+
+ return ret;
}
/* This can be called in either virtual mode or real mode */