Commit a1873c62 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux into drm-next

First new feature pull for 4.10.  Highlights:
- Support for multple virtual displays in the virtual dce component
- New VM mgr to support non-contiguous vram buffers
- Support for UVD powergating on additional asics
- Power management improvements
- lots of code cleanup and bug fixes

* 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux: (107 commits)
  drm/amdgpu: turn on/off uvd clock when dpm enable/disable on CI
  drm/amdgpu: disable dpm before turn off clock when vce idle.
  drm/amdgpu: enable uvd bypass mode for CI/VI.
  drm/amdgpu: just not load smc firmware if smu is already running
  drm/amdgpu: when suspend, set boot state instand of disable dpm.
  drm/amdgpu: use failed label to handle context init failure
  drm/amdgpu: add amdgpu_ttm_bo_eviction_valuable callback
  drm/ttm: make eviction decision a driver callback v2
  drm/ttm: fix coding style in ttm_bo_driver.h
  drm/radeon/pm: autoswitch power state when in balanced mode
  drm/amd/powerplay: fix spelling mistake and add KERN_WARNING to printks
  drm/amdgpu:new ids flag for preempt
  drm/amdgpu: mark symbols static where possible
  drm/amdgpu: change function declarations and add missing header dependencies
  drm/amdgpu: s/amdgpuCrtc/amdgpu_crtc/ in pageflip code
  drm/amdgpu/atom: remove a bunch of unused functions
  drm/amdgpu: consolidate atom scratch reg handling for hangs
  drm/amdgpu: use amdgpu_bo_[create|free]_kernel for wb
  drm/amdgpu: add VCE VM session tracking
  drm/amdgpu: improve parse_cs handling a bit
  ...
parents 5481e27f 3495a103
...@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ ...@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block # add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
......
...@@ -90,7 +90,6 @@ ...@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
...@@ -120,7 +119,6 @@ ...@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */ /* deleted */
...@@ -149,7 +147,6 @@ ...@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07 #define GRAPH_OBJECT_ENUM_ID7 0x07
#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/ /****************************************************/
/* Graphics Object ID Bit definition */ /* Graphics Object ID Bit definition */
...@@ -411,10 +408,6 @@ ...@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
/****************************************************/ /****************************************************/
/* Connector Object ID definition - Shared with BIOS */ /* Connector Object ID definition - Shared with BIOS */
/****************************************************/ /****************************************************/
......
This diff is collapsed.
...@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle) ...@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block_version *ip_version = const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_version) if (!ip_block)
return -EINVAL; return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device, r = amd_acp_hw_init(adev->acp.cgs_device,
ip_version->major, ip_version->minor); ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */ /* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) if (r == -ENODEV)
return 0; return 0;
...@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle, ...@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
return 0; return 0;
} }
const struct amd_ip_funcs acp_ip_funcs = { static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip", .name = "acp_ip",
.early_init = acp_early_init, .early_init = acp_early_init,
.late_init = NULL, .late_init = NULL,
...@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = { ...@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state, .set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state, .set_powergating_state = acp_set_powergating_state,
}; };
const struct amdgpu_ip_block_version acp_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_ACP,
.major = 2,
.minor = 2,
.rev = 0,
.funcs = &acp_ip_funcs,
};
...@@ -37,6 +37,6 @@ struct amdgpu_acp { ...@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd; struct acp_pm_domain *acp_genpd;
}; };
extern const struct amd_ip_funcs acp_ip_funcs; extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */ #endif /* __AMDGPU_ACP_H__ */
...@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, ...@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0; return 0;
} }
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
{
GET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
return le32_to_cpu(args.ulReturnEngineClock);
}
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
{
GET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
return le32_to_cpu(args.ulReturnMemoryClock);
}
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
uint32_t eng_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
uint32_t mem_clock)
{
SET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
if (adev->flags & AMD_IS_APU)
return;
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock) u32 eng_clock, u32 mem_clock)
{ {
...@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device * ...@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
} }
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev, volt_index = voltage_level;
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
/* 0xff01 is a flag rather then an actual voltage */
if (voltage_level == 0xff01)
return;
switch (crev) {
case 1:
args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id) u16 *leakage_id)
{ {
...@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) ...@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
} }
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
u32 tmp = RREG32(mmBIOS_SCRATCH_3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
WREG32(mmBIOS_SCRATCH_3, tmp);
}
/* Atom needs data in little endian format /* Atom needs data in little endian format
* so swap as appropriate when copying data to * so swap as appropriate when copying data to
* or from atom. Note that atom operates on * or from atom. Note that atom operates on
......
...@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, ...@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode, bool strobe_mode,
struct atom_mpll_param *mpll_param); struct atom_mpll_param *mpll_param);
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
uint32_t eng_clock);
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
uint32_t mem_clock);
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type);
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock); u32 eng_clock, u32 mem_clock);
...@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); ...@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
......
...@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) { switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB: case CGS_GPU_MEM_TYPE__VISIBLE_FB:
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size) if (max_offset > adev->mc.real_vram_size)
return -EINVAL; return -EINVAL;
...@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break; break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn = place.fpfn =
...@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h ...@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false); r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr); min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj); amdgpu_bo_unreserve(obj);
return r; return r;
...@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, ...@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1; int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
if (adev->ip_blocks[i].type == block_type) { if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state( r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev, (void *)adev,
state); state);
break; break;
...@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, ...@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1; int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
if (adev->ip_blocks[i].type == block_type) { if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state( r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev, (void *)adev,
state); state);
break; break;
...@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) ...@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1; result = AMDGPU_UCODE_ID_CP_MEC1;
break; break;
case CGS_UCODE_ID_CP_MEC_JT2: case CGS_UCODE_ID_CP_MEC_JT2:
if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 /* for VI. JT2 should be the same as JT1, because:
|| adev->asic_type == CHIP_POLARIS10) 1, MEC2 and MEC1 use exactly same FW.
result = AMDGPU_UCODE_ID_CP_MEC2; 2, JT2 is not pached but JT1 is.
else */
if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1; result = AMDGPU_UCODE_ID_CP_MEC1;
else
result = AMDGPU_UCODE_ID_CP_MEC2;
break; break;
case CGS_UCODE_ID_RLC_G: case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G; result = AMDGPU_UCODE_ID_RLC_G;
break; break;
case CGS_UCODE_ID_STORAGE:
result = AMDGPU_UCODE_ID_STORAGE;
break;
default: default:
DRM_ERROR("Firmware type not supported\n"); DRM_ERROR("Firmware type not supported\n");
} }
...@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) || if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) { (type == CGS_UCODE_ID_CP_MEC_JT2)) {
gpu_addr += le32_to_cpu(header->jt_offset) << 2; gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2; data_size = le32_to_cpu(header->jt_size) << 2;
} }
info->mc_addr = gpu_addr;
info->kptr = ucode->kaddr;
info->image_size = data_size; info->image_size = data_size;
info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
if (CGS_UCODE_ID_CP_MEC == type)
info->image_size = (header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else { } else {
...@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0; return 0;
} }
static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
{
CGS_FUNC_ADEV;
return amdgpu_sriov_vf(adev);
}
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info) struct cgs_system_info *sys_info)
{ {
...@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { ...@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled, amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method, amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info, amdgpu_cgs_query_system_info,
amdgpu_cgs_is_virtualization_enabled
}; };
static const struct cgs_os_ops amdgpu_cgs_os_ops = { static const struct cgs_os_ops amdgpu_cgs_os_ops = {
......
...@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { ...@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force, .force = amdgpu_connector_dvi_force,
}; };
static struct drm_encoder *
amdgpu_connector_virtual_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
continue;
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
if (encoder) {
amdgpu_connector_add_common_modes(encoder, connector);
}
return 0;
}
static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static int
amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
{
return 0;
}
static enum drm_connector_status
amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static int
amdgpu_connector_virtual_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
static void amdgpu_connector_virtual_force(struct drm_connector *connector)
{
return;
}
static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
.get_modes = amdgpu_connector_virtual_get_modes,
.mode_valid = amdgpu_connector_virtual_mode_valid,
.best_encoder = amdgpu_connector_virtual_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
.dpms = amdgpu_connector_virtual_dpms,
.detect = amdgpu_connector_virtual_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_virtual_set_property,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_virtual_force,
};
void void
amdgpu_connector_add(struct amdgpu_device *adev, amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id, uint32_t connector_id,
...@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, ...@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
break; break;
case DRM_MODE_CONNECTOR_VIRTUAL:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
} }
} }
......
...@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, ...@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved; u64 initial_bytes_moved;
uint32_t domain; uint32_t domain;
int r; int r;
...@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry: retry:
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
...@@ -387,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -387,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Last resort, try to evict something from the current working set */ /* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *lobj) struct amdgpu_bo *validated)
{ {
uint32_t domain = lobj->robj->allowed_domains; uint32_t domain = validated->allowed_domains;
int r; int r;
if (!p->evictable) if (!p->evictable)
...@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj; struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved; u64 initial_bytes_moved;
uint32_t other; uint32_t other;
/* If we reached our current BO we can forget it */ /* If we reached our current BO we can forget it */
if (candidate == lobj) if (candidate->robj == validated)
break; break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
...@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */ /* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other); amdgpu_ttm_placement_from_domain(bo, other);
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
if (unlikely(r)) if (unlikely(r))
...@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false; return false;
} }
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
do {
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
if (r)
return r;
if (bo->shadow)
r = amdgpu_cs_bo_validate(p, bo);
return r;
}
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated) struct list_head *validated)
{ {
...@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj) if (p->evictable == lobj)
p->evictable = NULL; p->evictable = NULL;
do { r = amdgpu_cs_validate(p, bo);
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
if (r) if (r)
return r; return r;
if (bo->shadow) {
r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
}
if (binding_userptr) { if (binding_userptr) {
drm_free_large(lobj->user_pages); drm_free_large(lobj->user_pages);
lobj->user_pages = NULL; lobj->user_pages = NULL;
...@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated); list_splice(&need_pages, &p->validated);
} }
amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0; p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated, p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry, struct amdgpu_bo_list_entry,
tv.head); tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
goto error_validate;
}
r = amdgpu_cs_list_validate(p, &duplicates); r = amdgpu_cs_list_validate(p, &duplicates);
if (r) { if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
...@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, ...@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */ /* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) { if (ring->funcs->parse_cs) {
p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) { for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i); r = amdgpu_ring_parse_cs(ring, p, i);
if (r) if (r)
return r; return r;
} }
} else { }
if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm); r = amdgpu_bo_vm_update_pte(p, vm);
...@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset; kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
return r; return r;
...@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r; return r;
} }
ib->gpu_addr = chunk_ib->va_start;
} }
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4; ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags; ib->flags = chunk_ib->flags;
j++; j++;
...@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */ /* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && ( if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD || parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) ...@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) if (unlikely(r))
return r; return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
return r;
} }
return 0; return 0;
......
...@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) ...@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs); rq, amdgpu_sched_jobs);
if (r) if (r)
break; goto failed;
} }
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
}
return 0; return 0;
failed:
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
} }
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
......
This diff is collapsed.
...@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work = struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work); container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev; struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base; struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags; unsigned long flags;
unsigned i; unsigned i;
int vpos, hpos; int vpos, hpos;
...@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one /* Wait until we're out of the vertical blank period before the one
* targeted by the flip * targeted by the flip
*/ */
if (amdgpuCrtc->enabled && if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&crtc->hwmode) &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank - (int)(work->target_vblank -
amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return; return;
} }
...@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */ /* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
amdgpuCrtc->crtc_id, amdgpuCrtc, work); amdgpu_crtc->crtc_id, amdgpu_crtc, work);
} }
......
...@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) ...@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
} }
for (i = 0; i < states->numEntries; i++) { adev->pm.dpm.num_of_vce_states =
if (i >= AMDGPU_MAX_VCE_LEVELS) states->numEntries > AMD_MAX_VCE_LEVELS ?
break; AMD_MAX_VCE_LEVELS : states->numEntries;
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *) vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] + ((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
...@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes) ...@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes]; return encoded_lanes[lanes];
} }
struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
{
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
return NULL;
}
This diff is collapsed.
...@@ -58,9 +58,10 @@ ...@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet * - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel * - 3.8.0 - Add support raster config init in the kernel
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 8 #define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
...@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64; ...@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1; int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0; int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0; int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0; int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32; int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
...@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); ...@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
...@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); ...@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = { static const struct pci_device_id pciidlist[] = {
...@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */ /* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */ /* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
......
...@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE); aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj); true, &gobj);
if (ret) { if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n", printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
......
...@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) ...@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) { if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size, r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj); NULL, NULL, &adev->gart.robj);
if (r) { if (r) {
return r; return r;
......
...@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) ...@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl * Call from drm_gem_handle_create which appear in both new and open ioctl
* case. * case.
*/ */
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv)
{ {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = abo->adev; struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
...@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = bo->adev; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
...@@ -468,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, ...@@ -468,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
}
/** /**
* amdgpu_gem_va_update_vm -update the bo_va in its VM * amdgpu_gem_va_update_vm -update the bo_va in its VM
* *
...@@ -478,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, ...@@ -478,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
* vital here, so they are not reported back to userspace. * vital here, so they are not reported back to userspace.
*/ */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, uint32_t operation) struct amdgpu_bo_va *bo_va,
uint32_t operation)
{ {
struct ttm_validate_buffer tv, *entry; struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
...@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r) if (r)
goto error_print; goto error_print;
amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) { list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here, /* if anything is swapped out don't swap it in here,
...@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU) if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve; goto error_unreserve;
} }
list_for_each_entry(entry, &duplicates, head) { r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); NULL);
/* if anything is swapped out don't swap it in here, if (r)
just abort and wait for the next CS */ goto error_unreserve;
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
r = amdgpu_vm_update_page_directory(adev, bo_va->vm); r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r) if (r)
...@@ -536,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -536,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
...@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd; struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list, duplicates; struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0; uint32_t invalid_flags, va_flags = 0;
...@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true; tv.shared = true;
list_add(&tv.head, &list); list_add(&tv.head, &list);
tv_pd.bo = &fpriv->vm.page_directory->tbo; amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
tv_pd.shared = true;
list_add(&tv_pd.head, &list);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) { if (r) {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_gfx.h"
/* /*
* GPU scratch registers helpers function. * GPU scratch registers helpers function.
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
#endif #endif
...@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return -ENOMEM; return -ENOMEM;
node->start = AMDGPU_BO_INVALID_OFFSET; node->start = AMDGPU_BO_INVALID_OFFSET;
node->size = mem->num_pages;
mem->mm_node = node; mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
......
...@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL; return -EINVAL;
} }
alloc_size = amdgpu_ring_get_dma_frame_size(ring) + alloc_size = ring->funcs->emit_frame_size + num_ibs *
num_ibs * amdgpu_ring_get_emit_ib_size(ring); ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size); r = amdgpu_ring_alloc(ring, alloc_size);
if (r) { if (r) {
...@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r; return r;
} }
if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring); patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) { if (vm) {
......
...@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
} }
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type && if (adev->ip_blocks[i].version->type == type &&
adev->ip_block_status[i].valid) { adev->ip_blocks[i].status.valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major; ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor; ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0; ip.capabilities_flags = 0;
ip.available_rings = ring_mask; ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment; ip.ib_start_alignment = ib_start_alignment;
...@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
} }
for (i = 0; i < adev->num_ip_blocks; i++) for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type && if (adev->ip_blocks[i].version->type == type &&
adev->ip_block_status[i].valid && adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++; count++;
...@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt, return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
} }
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->mc.real_vram_size;
mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size;
mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->mc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
adev->mc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
atomic64_read(&adev->vram_vis_usage);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
mem.gtt.total_heap_size = adev->mc.gtt_size;
mem.gtt.usable_heap_size =
adev->mc.gtt_size - adev->gart_pin_size;
mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
min((size_t)size, sizeof(mem)))
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: { case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size; unsigned n, alloc_size;
uint32_t *regs; uint32_t *regs;
...@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0; dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
...@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info, return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
} }
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
unsigned i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
if (vce_state) {
vce_clk_table.entries[i].sclk = vce_state->sclk;
vce_clk_table.entries[i].mclk = vce_state->mclk;
vce_clk_table.entries[i].eclk = vce_state->evclk;
vce_clk_table.num_valid_entries++;
}
}
return copy_to_user(out, &vce_clk_table,
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
}
default: default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query); DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL; return -EINVAL;
......
...@@ -285,7 +285,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) ...@@ -285,7 +285,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{ {
unsigned long end = addr + amdgpu_bo_size(bo) - 1; unsigned long end = addr + amdgpu_bo_size(bo) - 1;
struct amdgpu_device *adev = bo->adev; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn; struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL; struct amdgpu_mn_node *node = NULL;
struct list_head bos; struct list_head bos;
...@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/ */
void amdgpu_mn_unregister(struct amdgpu_bo *bo) void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = bo->adev; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn; struct amdgpu_mn *rmn;
struct list_head *head; struct list_head *head;
......
...@@ -341,8 +341,6 @@ struct amdgpu_mode_info { ...@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */ int num_dig; /* number of dig blocks */
int disp_priority; int disp_priority;
const struct amdgpu_display_funcs *funcs; const struct amdgpu_display_funcs *funcs;
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
}; };
#define AMDGPU_MAX_BL_LEVEL 0xFF #define AMDGPU_MAX_BL_LEVEL 0xFF
...@@ -413,6 +411,9 @@ struct amdgpu_crtc { ...@@ -413,6 +411,9 @@ struct amdgpu_crtc {
u32 wm_high; u32 wm_high;
u32 lb_vblank_lead_lines; u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode; struct drm_display_mode hw_mode;
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
}; };
struct amdgpu_encoder_atom_dig { struct amdgpu_encoder_atom_dig {
......
...@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev, ...@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo); bo = container_of(tbo, struct amdgpu_bo, tbo);
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base); drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent); amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) { if (!list_empty(&bo->shadow_list)) {
mutex_lock(&bo->adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list); list_del_init(&bo->shadow_list);
mutex_unlock(&bo->adev->shadow_list_lock); mutex_unlock(&adev->shadow_list_lock);
} }
kfree(bo->metadata); kfree(bo->metadata);
kfree(bo); kfree(bo);
...@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, ...@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
unsigned lpfn = 0;
/* This forces a reallocation if the flag wasn't set before */
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
adev->mc.visible_vram_size < adev->mc.real_vram_size) { adev->mc.visible_vram_size < adev->mc.real_vram_size) {
places[c].fpfn = visible_pfn; places[c].fpfn = visible_pfn;
places[c].lpfn = 0; places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | places[c].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_TOPDOWN; TTM_PL_FLAG_TOPDOWN;
...@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, ...@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
} }
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM; TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
...@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, ...@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{ {
amdgpu_ttm_placement_init(abo->adev, &abo->placement, struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
abo->placements, domain, abo->flags);
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
domain, abo->flags);
} }
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
...@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, ...@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r; int r;
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, true, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr); NULL, NULL, bo_ptr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
...@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo); kfree(bo);
return r; return r;
} }
bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
...@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset, u64 min_offset, u64 max_offset,
u64 *gpu_addr) u64 *gpu_addr)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i; int r, i;
unsigned fpfn, lpfn; unsigned fpfn, lpfn;
...@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0; return 0;
} }
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) { for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */ /* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset > (!max_offset || max_offset >
bo->adev->mc.visible_vram_size)) { adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset > if (WARN_ON_ONCE(min_offset >
bo->adev->mc.visible_vram_size)) adev->mc.visible_vram_size))
return -EINVAL; return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT; fpfn = min_offset >> PAGE_SHIFT;
lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else { } else {
fpfn = min_offset >> PAGE_SHIFT; fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT; lpfn = max_offset >> PAGE_SHIFT;
...@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) { if (unlikely(r)) {
dev_err(bo->adev->dev, "%p pin failed\n", bo); dev_err(adev->dev, "%p pin failed\n", bo);
goto error; goto error;
} }
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) { if (unlikely(r)) {
dev_err(bo->adev->dev, "%p bind failed\n", bo); dev_err(adev->dev, "%p bind failed\n", bo);
goto error; goto error;
} }
...@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL) if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo); *gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) { if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo); adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
bo->adev->invisible_pin_size += amdgpu_bo_size(bo); adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) { } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
bo->adev->gart_pin_size += amdgpu_bo_size(bo); adev->gart_pin_size += amdgpu_bo_size(bo);
} }
error: error:
...@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo) int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i; int r, i;
if (!bo->pin_count) { if (!bo->pin_count) {
dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0; return 0;
} }
bo->pin_count--; bo->pin_count--;
...@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) ...@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) { if (unlikely(r)) {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error; goto error;
} }
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo); adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) { } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
bo->adev->gart_pin_size -= amdgpu_bo_size(bo); adev->gart_pin_size -= amdgpu_bo_size(bo);
} }
error: error:
...@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, ...@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
...@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return; return;
abo = container_of(bo, struct amdgpu_bo, tbo); abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(abo->adev, abo); amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */ /* update statistics */
if (!new_mem) if (!new_mem)
return; return;
/* move_notify is called before move happens */ /* move_notify is called before move happens */
amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
} }
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{ {
struct amdgpu_device *adev; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
unsigned long offset, size, lpfn; unsigned long offset, size, lpfn;
int i, r; int i, r;
...@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0; return 0;
abo = container_of(bo, struct amdgpu_bo, tbo); abo = container_of(bo, struct amdgpu_bo, tbo);
adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM) if (bo->mem.mem_type != TTM_PL_VRAM)
return 0; return 0;
size = bo->mem.num_pages << PAGE_SHIFT; size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) <= adev->mc.visible_vram_size) /* TODO: figure out how to map scattered VRAM to the CPU */
if ((offset + size) <= adev->mc.visible_vram_size &&
(abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0; return 0;
/* Can't move a pinned BO to visible VRAM */ /* Can't move a pinned BO to visible VRAM */
...@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL; return -EINVAL;
/* hurrah the memory is not visible ! */ /* hurrah the memory is not visible ! */
abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) { for (i = 0; i < abo->placement.num_placement; i++) {
...@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) ...@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count); !bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset; return bo->tbo.offset;
} }
...@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) ...@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/ */
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r; int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo); dev_err(adev->dev, "%p reserve failed\n", bo);
return r; return r;
} }
return 0; return 0;
......
...@@ -986,10 +986,10 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, ...@@ -986,10 +986,10 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{ {
int i;
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state; enum amd_pm_state_type dpm_state;
int ret; int ret;
bool equal;
/* if dpm init failed */ /* if dpm init failed */
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
...@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else else
return; return;
/* no need to reprogram if nothing changed unless we are on BTC+ */
if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
/* vce just modifies an existing state so force a change */
if (ps->vce_active != adev->pm.dpm.vce_active)
goto force;
if (adev->flags & AMD_IS_APU) {
/* for APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
*/
if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
}
return;
} else {
/* for BTC+ if the num crtcs hasn't changed and state is the same,
* nothing to do, if the num crtcs is > 1 and state is the same,
* update display configuration.
*/
if (adev->pm.dpm.new_active_crtcs ==
adev->pm.dpm.current_active_crtcs) {
return;
} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
(adev->pm.dpm.new_active_crtc_count > 1)) {
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
return;
}
}
}
force:
if (amdgpu_dpm == 1) { if (amdgpu_dpm == 1) {
printk("switching from power state:\n"); printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
...@@ -1059,31 +1019,21 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -1059,31 +1019,21 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */ /* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active; ps->vce_active = adev->pm.dpm.vce_active;
amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev); ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret) if (ret)
return; return;
/* update display watermarks based on new power state */ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
amdgpu_display_bandwidth_update(adev); equal = false;
/* wait for the rings to drain */ if (equal)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { return;
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->ready)
amdgpu_fence_wait_empty(ring);
}
/* program the new power state */
amdgpu_dpm_set_power_state(adev); amdgpu_dpm_set_power_state(adev);
/* update current power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
amdgpu_dpm_post_set_power_state(adev); amdgpu_dpm_post_set_power_state(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
...@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) ...@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true; adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */ /* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
...@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev; struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_crtc *amdgpu_crtc;
int i = 0;
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
return; return;
if (adev->pp_enabled) { amdgpu_display_bandwidth_update(adev);
int i = 0;
amdgpu_display_bandwidth_update(adev); for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i];
struct amdgpu_ring *ring = adev->rings[i]; if (ring && ring->ready)
if (ring && ring->ready) amdgpu_fence_wait_empty(ring);
amdgpu_fence_wait_empty(ring); }
}
if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
......
...@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle) ...@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret; return ret;
} }
const struct amd_ip_funcs amdgpu_pp_ip_funcs = { static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay", .name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init, .early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init, .late_init = amdgpu_pp_late_init,
...@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { ...@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state, .set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state, .set_powergating_state = amdgpu_pp_set_powergating_state,
}; };
const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &amdgpu_pp_ip_funcs,
};
...@@ -23,11 +23,11 @@ ...@@ -23,11 +23,11 @@
* *
*/ */
#ifndef __AMDGPU_POPWERPLAY_H__ #ifndef __AMDGPU_POWERPLAY_H__
#define __AMDGPU_POPWERPLAY_H__ #define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h" #include "amd_shared.h"
extern const struct amd_ip_funcs amdgpu_pp_ip_funcs; extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
#endif /* __AMDSOC_DM_H__ */ #endif /* __AMDGPU_POWERPLAY_H__ */
...@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) ...@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{ {
/* Align requested size with padding so unlock_commit can /* Align requested size with padding so unlock_commit can
* pad safely */ * pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask; ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space /* Make sure we aren't trying to allocate more space
* than the maximum for one submission * than the maximum for one submission
...@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i; int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
amdgpu_ring_write(ring, ring->nop); amdgpu_ring_write(ring, ring->funcs->nop);
} }
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
...@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/ */
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{ {
while (ib->length_dw & ring->align_mask) while (ib->length_dw & ring->funcs->align_mask)
ib->ptr[ib->length_dw++] = ring->nop; ib->ptr[ib->length_dw++] = ring->funcs->nop;
} }
/** /**
...@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) ...@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count; uint32_t count;
/* We pad to match fetch size */ /* We pad to match fetch size */
count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); count = ring->funcs->align_mask + 1 -
count %= ring->align_mask + 1; (ring->wptr & ring->funcs->align_mask);
count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count); ring->funcs->insert_nop(ring, count);
mb(); mb();
...@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) ...@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
*/ */
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, u32 nop, u32 align_mask, unsigned max_dw, struct amdgpu_irq_src *irq_src,
struct amdgpu_irq_src *irq_src, unsigned irq_type, unsigned irq_type)
enum amdgpu_ring_type ring_type)
{ {
int r; int r;
...@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ...@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 * ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission); amdgpu_sched_hw_submission);
ring->align_mask = align_mask;
ring->nop = nop;
ring->type = ring_type;
/* Allocate ring buffer */ /* Allocate ring buffer */
if (ring->ring_obj == NULL) { if (ring->ring_obj == NULL) {
......
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
#include "gpu_scheduler.h"
/* max number of rings */
#define AMDGPU_MAX_RINGS 16
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE,
AMDGPU_RING_TYPE_SDMA,
AMDGPU_RING_TYPE_UVD,
AMDGPU_RING_TYPE_VCE
};
struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
/*
* Fences.
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct timer_list fallback_timer;
unsigned num_fences_mask;
spinlock_t lock;
struct fence **fences;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
* Rings.
*/
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
enum amdgpu_ring_type type;
uint32_t align_mask;
u32 nop;
/* ring read/write ptr handling */
u32 (*get_rptr)(struct amdgpu_ring *ring);
u32 (*get_wptr)(struct amdgpu_ring *ring);
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size);
/* testing functions */
int (*test_ring)(struct amdgpu_ring *ring);
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
unsigned max_dw;
int count_dw;
uint64_t gpu_addr;
uint32_t ptr_mask;
bool ready;
u32 idx;
u32 me;
u32 pipe;
u32 queue;
struct amdgpu_bo *mqd_obj;
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
char name[16];
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
};
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
#endif
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_SYNC_H__
#define __AMDGPU_SYNC_H__
#include <linux/hashtable.h>
struct fence;
struct reservation_object;
struct amdgpu_device;
struct amdgpu_ring;
/*
* Container for fences used to sync command submissions.
*/
struct amdgpu_sync {
DECLARE_HASHTABLE(fences, 4);
struct fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
#endif
This diff is collapsed.
...@@ -66,6 +66,7 @@ struct amdgpu_mman { ...@@ -66,6 +66,7 @@ struct amdgpu_mman {
}; };
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo, struct ttm_buffer_object *tbo,
......
...@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, ...@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr; ucode->mc_addr = mc_addr;
ucode->kaddr = kptr; ucode->kaddr = kptr;
if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
return 0;
header = (const struct common_firmware_header *)ucode->fw->data; header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)), le32_to_cpu(header->ucode_array_offset_bytes)),
...@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, ...@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0; return 0;
} }
static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
uint64_t mc_addr, void *kptr)
{
const struct gfx_firmware_header_v1_0 *header = NULL;
const struct common_firmware_header *comm_hdr = NULL;
uint8_t* src_addr = NULL;
uint8_t* dst_addr = NULL;
if (NULL == ucode->fw)
return 0;
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
dst_addr = ucode->kaddr +
ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
PAGE_SIZE);
src_addr = (uint8_t *)ucode->fw->data +
le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
return 0;
}
int amdgpu_ucode_init_bo(struct amdgpu_device *adev) int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{ {
struct amdgpu_bo **bo = &adev->firmware.fw_buf; struct amdgpu_bo **bo = &adev->firmware.fw_buf;
...@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL; const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
0, NULL, NULL, bo);
if (err) { if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed; goto failed;
...@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve; goto failed_reserve;
} }
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr); err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&fw_mc_addr);
if (err) { if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin; goto failed_pin;
...@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data; header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset); fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
} }
} }
......
...@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID { ...@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1, AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2, AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM, AMDGPU_UCODE_ID_MAXIMUM,
}; };
......
...@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r; int r;
parser->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) { if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw); ib->length_dw);
...@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
return r; return r;
if (!bo->adev->uvd.address_64_bit) { if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo); amdgpu_uvd_force_into_uvd_segment(bo);
} }
...@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo); NULL, NULL, &bo);
if (r) if (r)
return r; return r;
...@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo); NULL, NULL, &bo);
if (r) if (r)
return r; return r;
......
...@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ...@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo); NULL, NULL, &adev->vce.vcpu_bo);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
...@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r, idx = 0; int i, r, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p); r = amdgpu_cs_sysvm_access_required(p);
if (r) if (r)
return r; return r;
...@@ -787,6 +791,96 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -787,6 +791,96 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
return r; return r;
} }
/**
* amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
*
* @p: parser context
*
*/
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
{
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
int i, r = 0, idx = 0;
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
r = -EINVAL;
goto out;
}
switch (cmd) {
case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
r = session_idx;
goto out;
}
break;
case 0x01000001: /* create */
created |= 1 << session_idx;
if (destroyed & (1 << session_idx)) {
destroyed &= ~(1 << session_idx);
allocated |= 1 << session_idx;
} else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
}
break;
case 0x02000001: /* destroy */
destroyed |= 1 << session_idx;
break;
default:
break;
}
if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n");
r = -EINVAL;
goto out;
}
idx += len / 4;
}
if (allocated & ~created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
}
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
if (tmp & (1 << i))
atomic_set(&p->adev->vce.handles[i], 0);
return r;
}
/** /**
* amdgpu_vce_ring_emit_ib - execute indirect buffer * amdgpu_vce_ring_emit_ib - execute indirect buffer
* *
...@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, ...@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_write(ring, VCE_CMD_END);
} }
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
{
return
4; /* amdgpu_vce_ring_emit_ib */
}
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
{
return
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
}
/** /**
* amdgpu_vce_ring_test_ring - test if VCE ring is working * amdgpu_vce_ring_test_ring - test if VCE ring is working
* *
......
...@@ -34,6 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -34,6 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence); bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch); unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
......
This diff is collapsed.
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
/*
* GPUVM handling
*/
/* maximum number of VMIDs */
#define AMDGPU_NUM_VM 16
/* Maximum number of PTEs the hardware can write with one command */
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
/* LOG2 number of continuous pages for the fragment field */
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
#define AMDGPU_PTE_VALID (1 << 0)
#define AMDGPU_PTE_SYSTEM (1 << 1)
#define AMDGPU_PTE_SNOOPED (1 << 2)
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1 << 4)
#define AMDGPU_PTE_READABLE (1 << 5)
#define AMDGPU_PTE_WRITEABLE (1 << 6)
#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
};
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
/* protecting invalidated */
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
/* client id */
u64 client_id;
};
struct amdgpu_vm_id {
struct list_head list;
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
uint32_t current_gpu_reset_count;
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
uint32_t gws_size;
uint32_t oa_base;
uint32_t oa_size;
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
/* Handling of VM fences */
u64 fence_context;
unsigned seqno[AMDGPU_MAX_RINGS];
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
/* client id counter */
atomic64_t client_counter;
};
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
#endif
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include <drm/drmP.h>
#include "amdgpu.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
spinlock_t lock;
};
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
* @man: TTM memory type manager
* @p_size: maximum size of VRAM
*
* Allocate and initialize the VRAM manager.
*/
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct amdgpu_vram_mgr *mgr;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
drm_mm_init(&mgr->mm, 0, p_size);
spin_lock_init(&mgr->lock);
man->priv = mgr;
return 0;
}
/**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
*
* @man: TTM memory type manager
*
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
if (!drm_mm_clean(&mgr->mm)) {
spin_unlock(&mgr->lock);
return -EBUSY;
}
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
return 0;
}
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
unsigned i;
int r;
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
pages_per_node = max((uint32_t)amdgpu_vram_page_split,
mem->page_alignment);
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
if (place->flags & TTM_PL_FLAG_TOPDOWN) {
sflags = DRM_MM_SEARCH_BELOW;
aflags = DRM_MM_CREATE_TOP;
}
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
if (pages == pages_per_node)
alignment = pages_per_node;
else
sflags |= DRM_MM_SEARCH_BEST;
r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
alignment, 0,
place->fpfn, lpfn,
sflags, aflags);
if (unlikely(r))
goto error;
pages_left -= pages;
}
spin_unlock(&mgr->lock);
mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;
error:
while (i--)
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
kfree(nodes);
return r == -ENOSPC ? 0 : r;
}
/**
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: TTM memory object
*
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
if (!mem->mm_node)
return;
spin_lock(&mgr->lock);
while (pages) {
pages -= nodes->size;
drm_mm_remove_node(nodes);
++nodes;
}
spin_unlock(&mgr->lock);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
* @prefix: text prefix
*
* Dump the table content using printk.
*/
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_debug_table(&mgr->mm, prefix);
spin_unlock(&mgr->lock);
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
amdgpu_vram_mgr_init,
amdgpu_vram_mgr_fini,
amdgpu_vram_mgr_new,
amdgpu_vram_mgr_del,
amdgpu_vram_mgr_debug
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment