Commit 88dfc9a3 authored by Likun Gao's avatar Likun Gao Committed by Alex Deucher

drm/amdgpu: separate amdgpu_rlc into a single file

Separate the function and struct of RLC from the file of GFX.
Abstract the function of amdgpu_gfx_rlc_fini.
Signed-off-by: default avatarLikun Gao <Likun.Gao@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent fdb81fd7
...@@ -105,6 +105,7 @@ amdgpu-y += \ ...@@ -105,6 +105,7 @@ amdgpu-y += \
# add GFX block # add GFX block
amdgpu-y += \ amdgpu-y += \
amdgpu_gfx.o \ amdgpu_gfx.o \
amdgpu_rlc.o \
gfx_v8_0.o \ gfx_v8_0.o \
gfx_v9_0.o gfx_v9_0.o
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_gfx.h" #include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
/* delay 0.1 second to enable gfx off feature */ /* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
*/ */
#include "clearstate_defs.h" #include "clearstate_defs.h"
#include "amdgpu_ring.h" #include "amdgpu_ring.h"
#include "amdgpu_rlc.h"
/* GFX current status */ /* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
...@@ -37,65 +38,6 @@ ...@@ -37,65 +38,6 @@
#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
struct amdgpu_rlc_funcs {
void (*enter_safe_mode)(struct amdgpu_device *adev);
void (*exit_safe_mode)(struct amdgpu_device *adev);
int (*init)(struct amdgpu_device *adev);
void (*fini)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
};
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
bool in_safe_mode;
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
u32 save_and_restore_offset;
u32 clear_state_descriptor_offset;
u32 avail_scratch_ram_locations;
u32 reg_restore_list_size;
u32 reg_list_format_start;
u32 reg_list_format_separate_start;
u32 starting_offsets_start;
u32 reg_list_format_size_bytes;
u32 reg_list_size_bytes;
u32 reg_list_format_direct_reg_list_length;
u32 save_restore_list_cntl_size_bytes;
u32 save_restore_list_gpm_size_bytes;
u32 save_restore_list_srm_size_bytes;
u32 *register_list_format;
u32 *register_restore;
u8 *save_restore_list_cntl;
u8 *save_restore_list_gpm;
u8 *save_restore_list_srm;
bool is_rlc_v2_1;
};
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
struct amdgpu_mec { struct amdgpu_mec {
......
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
/**
* amdgpu_gfx_rlc_fini - Free BO which used for RLC
*
* @adev: amdgpu_device pointer
*
* Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
* and rlc_jump_table_block.
*/
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
{
/* save restore block */
if (adev->gfx.rlc.save_restore_obj) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
}
/* clear state block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
/* jump table block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_RLC_H__
#define __AMDGPU_RLC_H__
#include "clearstate_defs.h"
struct amdgpu_rlc_funcs {
void (*enter_safe_mode)(struct amdgpu_device *adev);
void (*exit_safe_mode)(struct amdgpu_device *adev);
int (*init)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
};
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
bool in_safe_mode;
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
u32 save_and_restore_offset;
u32 clear_state_descriptor_offset;
u32 avail_scratch_ram_locations;
u32 reg_restore_list_size;
u32 reg_list_format_start;
u32 reg_list_format_separate_start;
u32 starting_offsets_start;
u32 reg_list_format_size_bytes;
u32 reg_list_size_bytes;
u32 reg_list_format_direct_reg_list_length;
u32 save_restore_list_cntl_size_bytes;
u32 save_restore_list_gpm_size_bytes;
u32 save_restore_list_srm_size_bytes;
u32 *register_list_format;
u32 *register_restore;
u8 *save_restore_list_cntl;
u8 *save_restore_list_gpm;
u8 *save_restore_list_srm;
bool is_rlc_v2_1;
};
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
#endif
...@@ -2351,13 +2351,6 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, ...@@ -2351,13 +2351,6 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, val);
} }
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{ {
const u32 *src_ptr; const u32 *src_ptr;
...@@ -2386,7 +2379,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) ...@@ -2386,7 +2379,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
r); r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -2411,7 +2404,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) ...@@ -2411,7 +2404,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -3060,7 +3053,6 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { ...@@ -3060,7 +3053,6 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
.init = gfx_v6_0_rlc_init, .init = gfx_v6_0_rlc_init,
.fini = gfx_v6_0_rlc_fini,
.resume = gfx_v6_0_rlc_resume, .resume = gfx_v6_0_rlc_resume,
.stop = gfx_v6_0_rlc_stop, .stop = gfx_v6_0_rlc_stop,
.reset = gfx_v6_0_rlc_reset, .reset = gfx_v6_0_rlc_reset,
...@@ -3158,7 +3150,7 @@ static int gfx_v6_0_sw_fini(void *handle) ...@@ -3158,7 +3150,7 @@ static int gfx_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++) for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return 0; return 0;
} }
......
...@@ -3252,13 +3252,6 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, ...@@ -3252,13 +3252,6 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
* The RLC is a multi-purpose microengine that handles a * The RLC is a multi-purpose microengine that handles a
* variety of functions. * variety of functions.
*/ */
static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{ {
const u32 *src_ptr; const u32 *src_ptr;
...@@ -3298,7 +3291,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) ...@@ -3298,7 +3291,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.sr_ptr); (void **)&adev->gfx.rlc.sr_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -3321,7 +3314,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) ...@@ -3321,7 +3314,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -3341,7 +3334,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) ...@@ -3341,7 +3334,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cp_table_ptr); (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -4275,7 +4268,6 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { ...@@ -4275,7 +4268,6 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode,
.init = gfx_v7_0_rlc_init, .init = gfx_v7_0_rlc_init,
.fini = gfx_v7_0_rlc_fini,
.resume = gfx_v7_0_rlc_resume, .resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop, .stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset, .reset = gfx_v7_0_rlc_reset,
...@@ -4594,7 +4586,7 @@ static int gfx_v7_0_sw_fini(void *handle) ...@@ -4594,7 +4586,7 @@ static int gfx_v7_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_cp_compute_fini(adev);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
gfx_v7_0_mec_fini(adev); gfx_v7_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr, &adev->gfx.rlc.clear_state_gpu_addr,
......
...@@ -1348,12 +1348,6 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) ...@@ -1348,12 +1348,6 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
} }
} }
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{ {
volatile u32 *dst_ptr; volatile u32 *dst_ptr;
...@@ -1376,7 +1370,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) ...@@ -1376,7 +1370,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -2166,7 +2160,7 @@ static int gfx_v8_0_sw_fini(void *handle) ...@@ -2166,7 +2160,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev); gfx_v8_0_mec_fini(adev);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr, &adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
...@@ -5634,7 +5628,6 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { ...@@ -5634,7 +5628,6 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.enter_safe_mode = iceland_enter_rlc_safe_mode, .enter_safe_mode = iceland_enter_rlc_safe_mode,
.exit_safe_mode = iceland_exit_rlc_safe_mode, .exit_safe_mode = iceland_exit_rlc_safe_mode,
.init = gfx_v8_0_rlc_init, .init = gfx_v8_0_rlc_init,
.fini = gfx_v8_0_rlc_fini,
.resume = gfx_v8_0_rlc_resume, .resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop, .stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset, .reset = gfx_v8_0_rlc_reset,
......
...@@ -1112,19 +1112,6 @@ static void rv_init_cp_jump_table(struct amdgpu_device *adev) ...@@ -1112,19 +1112,6 @@ static void rv_init_cp_jump_table(struct amdgpu_device *adev)
} }
} }
static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
{
/* clear state block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
/* jump table block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{ {
volatile u32 *dst_ptr; volatile u32 *dst_ptr;
...@@ -1147,7 +1134,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) ...@@ -1147,7 +1134,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
r); r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* set up the cs buffer */ /* set up the cs buffer */
...@@ -1169,7 +1156,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) ...@@ -1169,7 +1156,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"(%d) failed to create cp table bo\n", r); "(%d) failed to create cp table bo\n", r);
adev->gfx.rlc.funcs->fini(adev); amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
...@@ -3884,7 +3871,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { ...@@ -3884,7 +3871,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
.exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode,
.init = gfx_v9_0_rlc_init, .init = gfx_v9_0_rlc_init,
.fini = gfx_v9_0_rlc_fini,
.resume = gfx_v9_0_rlc_resume, .resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop, .stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset, .reset = gfx_v9_0_rlc_reset,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment