Commit eb3f0517 authored by Pei Zhang's avatar Pei Zhang Committed by Zhenyu Wang

drm/i915/gvt: refine function emulate_mmio_read/write

These 2 functions are coded by multiple person in multiple patches. The
'return' and 'goto err' are mix-used in same place, which cause the
function looks disorder. Unify to use only 'goto' so that the gvt lock
is acquired in one place and released in one place.
Signed-off-by: default avatarPei Zhang <pei.zhang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 6ee942d5
...@@ -157,7 +157,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -157,7 +157,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
if (vgpu->failsafe) { if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0; return 0;
...@@ -166,8 +165,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -166,8 +165,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) { if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
...@@ -183,8 +181,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -183,8 +181,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret, t->gfn, pa, *(u32 *)p_data, ret, t->gfn, pa, *(u32 *)p_data,
bytes); bytes);
} }
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
} }
...@@ -205,14 +202,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -205,14 +202,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes); p_data, bytes);
if (ret) if (ret)
goto err; goto err;
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
...@@ -228,11 +223,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -228,11 +223,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err; goto err;
intel_gvt_mmio_set_accessed(gvt, offset); intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock); ret = 0;
return 0; goto out;
err: err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes); offset, bytes);
out:
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }
...@@ -263,8 +260,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -263,8 +260,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) { if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
...@@ -280,8 +276,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -280,8 +276,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
ret, t->gfn, pa, ret, t->gfn, pa,
*(u32 *)p_data, bytes); *(u32 *)p_data, bytes);
} }
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
} }
...@@ -302,14 +297,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -302,14 +297,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes); p_data, bytes);
if (ret) if (ret)
goto err; goto err;
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock); goto out;
return ret;
} }
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false); ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
...@@ -317,11 +310,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -317,11 +310,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
goto err; goto err;
intel_gvt_mmio_set_accessed(gvt, offset); intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock); ret = 0;
return 0; goto out;
err: err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes); bytes);
out:
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment