Commit fecca689 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-04-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Quiet enough for rc2, mostly amdgpu fixes, a couple of i915 fixes, and
  one nouveau module firmware fix:

  i915:
   - Fix guest page access by using the brand new VFIO dma r/w interface (Yan)
   - Fix for i915 perf read buffers (Ashutosh)

  amdgpu:
   - gfx10 fix
   - SMU7 overclocking fix
   - RAS fix
   - GPU reset fix
   - Fix a regression in a previous suspend/resume fix
   - Add a gfxoff quirk

  nouveau:
   - fix missing MODULE_FIRMWARE"

* tag 'drm-fixes-2020-04-18' of git://anongit.freedesktop.org/drm/drm:
  drm/nouveau/sec2/gv100-: add missing MODULE_FIRMWARE()
  drm/amdgpu/gfx9: add gfxoff quirk
  drm/amdgpu: fix the hw hang during perform system reboot and reset
  drm/i915/gvt: switch to user vfio_group_pin/upin_pages
  drm/i915/gvt: subsitute kvm_read/write_guest with vfio_dma_rw
  drm/i915/gvt: hold reference of VFIO group during opening of vgpu
  drm/i915/perf: Do not clear pollin for small user read buffers
  drm/amdgpu: fix wrong vram lost counter increment V2
  drm/amd/powerplay: unload mp1 for Arcturus RAS baco reset
  drm/amd/powerplay: force the trim of the mclk dpm_levels if OD is enabled
  Revert "drm/amdgpu: change SH MEM alignment mode for gfx10"
parents 90280eaa 4da858c0
...@@ -2008,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) ...@@ -2008,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
*/ */
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{ {
return !!memcmp(adev->gart.ptr, adev->reset_magic, if (memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM); AMDGPU_RESET_MAGIC_NUM))
return true;
if (!adev->in_gpu_reset)
return false;
/*
* For all ASICs with baco/mode1 reset, the VRAM is
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
default:
return false;
}
} }
/** /**
...@@ -2340,6 +2356,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) ...@@ -2340,6 +2356,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{ {
int i, r; int i, r;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
......
...@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev) ...@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r; int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
} else { } else {
r = cik_asic_pci_config_reset(adev); r = cik_asic_pci_config_reset(adev);
......
...@@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = ...@@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
#define DEFAULT_SH_MEM_CONFIG \ #define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \ (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
......
...@@ -1234,6 +1234,8 @@ struct amdgpu_gfxoff_quirk { ...@@ -1234,6 +1234,8 @@ struct amdgpu_gfxoff_quirk {
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
}; };
......
...@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev) ...@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu; struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
ret = smu_baco_enter(smu); ret = smu_baco_enter(smu);
if (ret) if (ret)
return ret; return ret;
...@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev) ...@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (ret) if (ret)
return ret; return ret;
} else { } else {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev); ret = nv_asic_mode1_reset(adev);
} }
......
...@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev) ...@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) { switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO: case AMD_RESET_METHOD_BACO:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev); return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2: case AMD_RESET_METHOD_MODE2:
return amdgpu_dpm_mode2_reset(adev); return amdgpu_dpm_mode2_reset(adev);
default: default:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev); return soc15_asic_mode1_reset(adev);
} }
} }
......
...@@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev) ...@@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r; int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
} else { } else {
r = vi_asic_pci_config_reset(adev); r = vi_asic_pci_config_reset(adev);
......
...@@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, ...@@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{ {
uint32_t i; uint32_t i;
/* force the trim if mclk_switching is disabled to prevent flicker */
bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) { for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/ /*skip the trim if od is enabled*/
if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit if ((!hwmgr->od_enabled || force_trim)
&& (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit)) || dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false; dpm_table->dpm_levels[i].enabled = false;
else else
......
...@@ -1718,6 +1718,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) ...@@ -1718,6 +1718,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (ret) if (ret)
goto out; goto out;
if (ras && ras->supported) {
ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
if (ret)
goto out;
}
/* clear vbios scratch 6 and 7 for coming asic reinit */ /* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0); WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0); WREG32(adev->bios_scratch_reg_offset + 7, 0);
......
...@@ -131,6 +131,7 @@ struct kvmgt_vdev { ...@@ -131,6 +131,7 @@ struct kvmgt_vdev {
struct work_struct release_work; struct work_struct release_work;
atomic_t released; atomic_t released;
struct vfio_device *vfio_device; struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
}; };
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
...@@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size) unsigned long size)
{ {
struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages; int total_pages;
int npage; int npage;
int ret; int ret;
...@@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) { for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage; unsigned long cur_gfn = gfn + npage;
ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1); ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1); drm_WARN_ON(&i915->drm, ret != 1);
} }
} }
...@@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page) unsigned long size, struct page **page)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0; unsigned long base_pfn = 0;
int total_pages; int total_pages;
int npage; int npage;
...@@ -183,7 +186,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -183,7 +186,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage; unsigned long cur_gfn = gfn + npage;
unsigned long pfn; unsigned long pfn;
ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1, ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn); IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) { if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
...@@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events; unsigned long events;
int ret; int ret;
struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
...@@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu; goto undo_iommu;
} }
vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register;
}
vdev->vfio_group = vfio_group;
/* Take a module reference as mdev core doesn't take /* Take a module reference as mdev core doesn't take
* a reference for vendor driver. * a reference for vendor driver.
*/ */
...@@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
return ret; return ret;
undo_group: undo_group:
vfio_group_put_external_user(vdev->vfio_group);
vdev->vfio_group = NULL;
undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier); &vdev->group_notifier);
...@@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) ...@@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info); kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu);
vfio_group_put_external_user(vdev->vfio_group);
vdev->kvm = NULL; vdev->kvm = NULL;
vgpu->handle = 0; vgpu->handle = 0;
...@@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, ...@@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write) void *buf, unsigned long len, bool write)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct kvm *kvm;
int idx, ret;
bool kthread = current->mm == NULL;
if (!handle_valid(handle)) if (!handle_valid(handle))
return -ESRCH; return -ESRCH;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
if (kthread) {
if (!mmget_not_zero(kvm->mm))
return -EFAULT;
use_mm(kvm->mm);
}
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
if (kthread) {
unuse_mm(kvm->mm);
mmput(kvm->mm);
}
return ret; return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
gpa, buf, len, write);
} }
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
......
...@@ -2940,49 +2940,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce, ...@@ -2940,49 +2940,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
gen8_update_reg_state_unlocked(ce, stream); gen8_update_reg_state_unlocked(ce, stream);
} }
/**
* i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @buf: destination buffer given by userspace
* @count: the number of bytes userspace wants to read
* @ppos: (inout) file seek position (unused)
*
* Besides wrapping &i915_perf_stream_ops->read this provides a common place to
* ensure that if we've successfully copied any data then reporting that takes
* precedence over any internal error status, so the data isn't lost.
*
* For example ret will be -ENOSPC whenever there is more buffered data than
* can be copied to userspace, but that's only interesting if we weren't able
* to copy some data because it implies the userspace buffer is too small to
* receive a single record (and we never split records).
*
* Another case with ret == -EFAULT is more of a grey area since it would seem
* like bad form for userspace to ask us to overrun its buffer, but the user
* knows best:
*
* http://yarchive.net/comp/linux/partial_reads_writes.html
*
* Returns: The number of bytes copied or a negative error code on failure.
*/
static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
/* Note we keep the offset (aka bytes read) separate from any
* error status so that the final check for whether we return
* the bytes read with a higher precedence than any error (see
* comment below) doesn't need to be handled/duplicated in
* stream->ops->read() implementations.
*/
size_t offset = 0;
int ret = stream->ops->read(stream, buf, count, &offset);
return offset ?: (ret ?: -EAGAIN);
}
/** /**
* i915_perf_read - handles read() FOP for i915 perf stream FDs * i915_perf_read - handles read() FOP for i915 perf stream FDs
* @file: An i915 perf stream file * @file: An i915 perf stream file
...@@ -3008,7 +2965,8 @@ static ssize_t i915_perf_read(struct file *file, ...@@ -3008,7 +2965,8 @@ static ssize_t i915_perf_read(struct file *file,
{ {
struct i915_perf_stream *stream = file->private_data; struct i915_perf_stream *stream = file->private_data;
struct i915_perf *perf = stream->perf; struct i915_perf *perf = stream->perf;
ssize_t ret; size_t offset = 0;
int ret;
/* To ensure it's handled consistently we simply treat all reads of a /* To ensure it's handled consistently we simply treat all reads of a
* disabled stream as an error. In particular it might otherwise lead * disabled stream as an error. In particular it might otherwise lead
...@@ -3031,13 +2989,12 @@ static ssize_t i915_perf_read(struct file *file, ...@@ -3031,13 +2989,12 @@ static ssize_t i915_perf_read(struct file *file,
return ret; return ret;
mutex_lock(&perf->lock); mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, ret = stream->ops->read(stream, buf, count, &offset);
buf, count, ppos);
mutex_unlock(&perf->lock); mutex_unlock(&perf->lock);
} while (ret == -EAGAIN); } while (!offset && !ret);
} else { } else {
mutex_lock(&perf->lock); mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos); ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock); mutex_unlock(&perf->lock);
} }
...@@ -3048,15 +3005,15 @@ static ssize_t i915_perf_read(struct file *file, ...@@ -3048,15 +3005,15 @@ static ssize_t i915_perf_read(struct file *file,
* and read() returning -EAGAIN. Clearing the oa.pollin state here * and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback * effectively ensures we back off until the next hrtimer callback
* before reporting another EPOLLIN event. * before reporting another EPOLLIN event.
* The exception to this is if ops->read() returned -ENOSPC which means
* that more OA data is available than could fit in the user provided
* buffer. In this case we want the next poll() call to not block.
*/ */
if (ret >= 0 || ret == -EAGAIN) { if (ret != -ENOSPC)
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
stream->pollin = false; stream->pollin = false;
}
return ret; /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
return offset ?: (ret ?: -EAGAIN);
} }
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
static const struct nvkm_sec2_fwif static const struct nvkm_sec2_fwif
gp108_sec2_fwif[] = { gp108_sec2_fwif[] = {
......
...@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver, ...@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
return 0; return 0;
} }
MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
static const struct nvkm_sec2_fwif static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = { tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 }, { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment