Commit 2691f0ff authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

KVM: PPC: Propagate errors to the guest when failed instead of ignoring

At the moment if the PUT_TCE{_INDIRECT} handlers fail to update
the hardware tables, we print a warning once, clear the entry and
continue. This is so as at the time the assumption was that if
a VFIO device is hotplugged into the guest, and the userspace replays
virtual DMA mappings (i.e. TCEs) to the hardware tables and if this fails,
then there is nothing useful we can do about it.

However the assumption is not valid as these handlers are not called for
TCE replay (VFIO ioctl interface is used for that) and these handlers
are for new TCEs.

This returns an error to the guest if there is a request which cannot be
processed. By now the only possible failure must be H_TOO_HARD.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 42de7b9e
...@@ -568,14 +568,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -568,14 +568,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir); entry, ua, dir);
if (ret == H_SUCCESS) if (ret != H_SUCCESS) {
continue;
if (ret == H_TOO_HARD)
goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry); kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
}
} }
kvmppc_tce_put(stt, entry, tce); kvmppc_tce_put(stt, entry, tce);
...@@ -663,14 +659,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -663,14 +659,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua, stit->tbl, entry + i, ua,
iommu_tce_direction(tce)); iommu_tce_direction(tce));
if (ret == H_SUCCESS) if (ret != H_SUCCESS) {
continue;
if (ret == H_TOO_HARD)
goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry); kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
}
} }
kvmppc_tce_put(stt, entry + i, tce); kvmppc_tce_put(stt, entry + i, tce);
......
...@@ -405,14 +405,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -405,14 +405,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir); stit->tbl, entry, ua, dir);
if (ret == H_SUCCESS) if (ret != H_SUCCESS) {
continue;
if (ret == H_TOO_HARD)
return ret;
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
return ret;
}
} }
kvmppc_tce_put(stt, entry, tce); kvmppc_tce_put(stt, entry, tce);
...@@ -558,14 +554,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -558,14 +554,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua, stit->tbl, entry + i, ua,
iommu_tce_direction(tce)); iommu_tce_direction(tce));
if (ret == H_SUCCESS) if (ret != H_SUCCESS) {
continue; kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
entry);
if (ret == H_TOO_HARD)
goto unlock_exit; goto unlock_exit;
}
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
} }
kvmppc_tce_put(stt, entry + i, tce); kvmppc_tce_put(stt, entry + i, tce);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment