Commit 2072ce03 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

A few fixes for 5.0:
- Fix radeon crash on SI with VM passthrough
- Fencing fix for shared buffers
- Fix power hwmon reporting on APUs
- Powerplay fix for APUs
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201043455.5988-1-alexander.deucher@amd.com
parents 8834f560 6e11ea9d
...@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR; effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) && if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0; return 0;
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "amdgpu_gem.h" #include "amdgpu_gem.h"
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/** /**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
...@@ -187,6 +188,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -187,6 +188,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static int
__reservation_object_make_exclusive(struct reservation_object *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
return 0;
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
reservation_object_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1), 0,
false);
if (!array)
goto err_fences_put;
reservation_object_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
return 0;
err_fences_put:
while (count--)
dma_fence_put(fences[count]);
kfree(fences);
return -ENOMEM;
}
/** /**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer * @dma_buf: Shared DMA buffer
...@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, ...@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) { if (attach->dev->driver != adev->dev->driver) {
/* /*
* Wait for all shared fences to complete before we switch to future * We only create shared fences for internal use, but importers
* use of exclusive fence on this prime shared bo. * of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/ */
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, r = __reservation_object_make_exclusive(bo->tbo.resv);
true, false, if (r)
MAX_SCHEDULE_TIMEOUT);
if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
goto error_unreserve; goto error_unreserve;
}
} }
/* pin buffer into GTT */ /* pin buffer into GTT */
......
...@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, ...@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
break; break;
case amd_pp_dpp_clock: case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk; pclk_vol_table = pinfo->vdd_dep_on_dppclk;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) ...@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size; u16 data_offset, size;
u8 frev, crev; u8 frev, crev;
struct ci_power_info *pi; struct ci_power_info *pi;
enum pci_bus_speed speed_cap; enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self; struct pci_dev *root = rdev->pdev->bus->self;
int ret; int ret;
...@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) ...@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return -ENOMEM; return -ENOMEM;
rdev->pm.dpm.priv = pi; rdev->pm.dpm.priv = pi;
speed_cap = pcie_get_speed_cap(root); if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) { if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0; pi->sys_pcie_mask = 0;
} else { } else {
......
...@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) ...@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi; struct ni_power_info *ni_pi;
struct si_power_info *si_pi; struct si_power_info *si_pi;
struct atom_clock_dividers dividers; struct atom_clock_dividers dividers;
enum pci_bus_speed speed_cap; enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self; struct pci_dev *root = rdev->pdev->bus->self;
int ret; int ret;
...@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) ...@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg; eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx; pi = &eg_pi->rv7xx;
speed_cap = pcie_get_speed_cap(root); if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) { if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0; si_pi->sys_pcie_mask = 0;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment