Commit 56fb6f92 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-next-2024-05-25' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Some fixes for the end of the merge window, mostly amdgpu and panthor,
  with one nouveau uAPI change that fixes a bad decision we made a few
  months back.

  nouveau:
   - fix bo metadata uAPI for vm bind

  panthor:
   - Fixes for panthor's heap logical block.
   - Reset on unrecoverable fault
   - Fix VM references.
   - Reset fix.

  xlnx:
   - xlnx compile and doc fixes.

  amdgpu:
   - Handle vbios table integrated info v2.3

  amdkfd:
   - Handle duplicate BOs in reserve_bo_and_cond_vms
   - Handle memory limitations on small APUs

  dp/mst:
   - MST null deref fix.

  bridge:
   - Don't let next bridge create connector in adv7511 to make probe
     work"

* tag 'drm-next-2024-05-25' of https://gitlab.freedesktop.org/drm/kernel:
  drm/amdgpu/atomfirmware: add intergrated info v2.3 table
  drm/mst: Fix NULL pointer dereference at drm_dp_add_payload_part2
  drm/amdkfd: Let VRAM allocations go to GTT domain on small APUs
  drm/amdkfd: handle duplicate BOs in reserve_bo_and_cond_vms
  drm/bridge: adv7511: Attach next bridge without creating connector
  drm/buddy: Fix the warn on's during force merge
  drm/nouveau: use tile_mode and pte_kind for VM_BIND bo allocations
  drm/panthor: Call panthor_sched_post_reset() even if the reset failed
  drm/panthor: Reset the FW VM to NULL on unplug
  drm/panthor: Keep a ref to the VM at the panthor_kernel_bo level
  drm/panthor: Force an immediate reset on unrecoverable faults
  drm/panthor: Document drm_panthor_tiler_heap_destroy::handle validity constraints
  drm/panthor: Fix an off-by-one in the heap context retrieval logic
  drm/panthor: Relax the constraints on the tiler chunk size
  drm/panthor: Make sure the tiler initial/max chunks are consistent
  drm/panthor: Fix tiler OOM handling to allow incremental rendering
  drm: xlnx: zynqmp_dpsub: Fix compilation error
  drm: xlnx: zynqmp_dpsub: Fix few function comments
parents 0b32d436 32a0bb7e
...@@ -455,6 +455,9 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, ...@@ -455,6 +455,9 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
else else
mem_info->local_mem_size_private = mem_info->local_mem_size_private =
KFD_XCP_MEMORY_SIZE(adev, xcp->id); KFD_XCP_MEMORY_SIZE(adev, xcp->id);
} else if (adev->flags & AMD_IS_APU) {
mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
mem_info->local_mem_size_private = 0;
} else { } else {
mem_info->local_mem_size_public = adev->gmc.visible_vram_size; mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
mem_info->local_mem_size_private = adev->gmc.real_vram_size - mem_info->local_mem_size_private = adev->gmc.real_vram_size -
...@@ -824,6 +827,8 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) ...@@ -824,6 +827,8 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
} }
do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
return ALIGN_DOWN(tmp, PAGE_SIZE); return ALIGN_DOWN(tmp, PAGE_SIZE);
} else if (adev->flags & AMD_IS_APU) {
return (ttm_tt_pages_limit() << PAGE_SHIFT);
} else { } else {
return adev->gmc.real_vram_size; return adev->gmc.real_vram_size;
} }
......
...@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
if (adev->gmc.is_app_apu) { if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
system_mem_needed = size; system_mem_needed = size;
ttm_mem_needed = size; ttm_mem_needed = size;
} }
...@@ -232,7 +232,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -232,7 +232,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
"adev reference can't be null when vram is used"); "adev reference can't be null when vram is used");
if (adev && xcp_id >= 0) { if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed; adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ? adev->kfd.vram_used_aligned[xcp_id] +=
(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
vram_needed : vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
} }
...@@ -260,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, ...@@ -260,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) { if (adev) {
adev->kfd.vram_used[xcp_id] -= size; adev->kfd.vram_used[xcp_id] -= size;
if (adev->gmc.is_app_apu) { if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size; adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size; kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size; kfd_mem_limit.ttm_mem_used -= size;
...@@ -889,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, ...@@ -889,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is * if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device * allowed for both small and large BAR configurations of peer device
*/ */
if ((adev != bo_adev && !adev->gmc.is_app_apu) && if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
...@@ -1188,7 +1189,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ...@@ -1188,7 +1189,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
int ret; int ret;
ctx->sync = &mem->sync; ctx->sync = &mem->sync;
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&ctx->exec) { drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0; ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) { list_for_each_entry(entry, &mem->attachments, list) {
...@@ -1656,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, ...@@ -1656,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size) - atomic64_read(&adev->vram_pin_size)
- reserved_for_pt; - reserved_for_pt;
if (adev->gmc.is_app_apu) { if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ? system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit : kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit - kfd_mem_limit.max_system_mem_limit -
...@@ -1704,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1704,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.is_app_apu) { if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0; alloc_flags = 0;
...@@ -1951,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1951,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) { if (size) {
if (!is_imported && if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
(adev->gmc.is_app_apu && ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size; *size = bo_size;
else else
...@@ -2373,8 +2375,9 @@ static int import_obj_create(struct amdgpu_device *adev, ...@@ -2373,8 +2375,9 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->dmabuf = dma_buf; (*mem)->dmabuf = dma_buf;
(*mem)->bo = bo; (*mem)->bo = bo;
(*mem)->va = va; (*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ? (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0; (*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info; (*mem)->process_info = avm->process_info;
......
...@@ -212,6 +212,7 @@ union igp_info { ...@@ -212,6 +212,7 @@ union igp_info {
struct atom_integrated_system_info_v1_11 v11; struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12; struct atom_integrated_system_info_v1_12 v12;
struct atom_integrated_system_info_v2_1 v21; struct atom_integrated_system_info_v2_1 v21;
struct atom_integrated_system_info_v2_3 v23;
}; };
union umc_info { union umc_info {
...@@ -360,6 +361,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, ...@@ -360,6 +361,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (vram_type) if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break; break;
case 3:
mem_channel_number = igp_info->v23.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
mem_type = igp_info->v23.memorytype;
if (mem_type == LpDdr5MemType)
mem_channel_width = 32;
else
mem_channel_width = 64;
if (vram_width)
*vram_width = mem_channel_number * mem_channel_width;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev) ...@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL; return -EINVAL;
if (adev->gmc.is_app_apu) if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
return 0; return 0;
pgmap = &kfddev->pgmap; pgmap = &kfddev->pgmap;
......
...@@ -2619,7 +2619,8 @@ svm_range_best_restore_location(struct svm_range *prange, ...@@ -2619,7 +2619,8 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1; return -1;
} }
if (node->adev->gmc.is_app_apu) if (node->adev->gmc.is_app_apu ||
node->adev->flags & AMD_IS_APU)
return 0; return 0;
if (prange->preferred_loc == gpuid || if (prange->preferred_loc == gpuid ||
...@@ -3337,7 +3338,8 @@ svm_range_best_prefetch_location(struct svm_range *prange) ...@@ -3337,7 +3338,8 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out; goto out;
} }
if (bo_node->adev->gmc.is_app_apu) { if (bo_node->adev->gmc.is_app_apu ||
bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0; best_loc = 0;
goto out; goto out;
} }
......
...@@ -201,7 +201,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s ...@@ -201,7 +201,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory. * is initialized to not 0 when page migration register device memory.
*/ */
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
(adev)->gmc.is_app_apu) (adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
......
...@@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation( ...@@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
if (ret) { if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status, amdgpu_dm_set_mst_status(&aconnector->mst_status,
......
...@@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2 ...@@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2
uint32_t reserved4[189]; uint32_t reserved4[189];
}; };
struct uma_carveout_option {
char optionName[29]; //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
uint8_t memoryCarvedGb; //memory carved out with setting
uint8_t memoryRemainingGb; //memory remaining on system
union {
struct _flags {
uint8_t Auto : 1;
uint8_t Custom : 1;
uint8_t Reserved : 6;
} flags;
uint8_t all8;
} uma_carveout_option_flags;
};
struct atom_integrated_system_info_v2_3 {
struct atom_common_table_header table_header;
uint32_t vbios_misc; // enum of atom_system_vbiosmisc_def
uint32_t gpucapinfo; // enum of atom_system_gpucapinf_def
uint32_t system_config;
uint32_t cpucapinfo;
uint16_t gpuclk_ss_percentage; // unit of 0.001%, 1000 mean 1%
uint16_t gpuclk_ss_type;
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
uint8_t umachannelnumber; // number of memory channels
uint8_t htc_hyst_limit;
uint8_t htc_tmp_limit;
uint8_t reserved1; // dp_ss_control
uint8_t gpu_package_id;
struct edp_info_table edp1_info;
struct edp_info_table edp2_info;
uint32_t reserved2[8];
struct atom_external_display_connection_info extdispconninfo;
uint8_t UMACarveoutVersion;
uint8_t UMACarveoutIndexMax;
uint8_t UMACarveoutTypeDefault;
uint8_t UMACarveoutIndexDefault;
uint8_t UMACarveoutType; //Auto or Custom
uint8_t UMACarveoutIndex;
struct uma_carveout_option UMASizeControlOption[20];
uint8_t reserved3[110];
};
// system_config // system_config
enum atom_system_vbiosmisc_def{ enum atom_system_vbiosmisc_def{
INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01, INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
......
...@@ -953,7 +953,8 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge, ...@@ -953,7 +953,8 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
int ret = 0; int ret = 0;
if (adv->next_bridge) { if (adv->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, flags); ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2); ...@@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
/** /**
* drm_dp_add_payload_part2() - Execute payload update part 2 * drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use. * @mgr: Manager to use.
* @state: The global atomic state
* @payload: The payload to update * @payload: The payload to update
* *
* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
...@@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2); ...@@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
* Returns: 0 on success, negative error code on failure. * Returns: 0 on success, negative error code on failure.
*/ */
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload) struct drm_dp_mst_atomic_payload *payload)
{ {
int ret = 0; int ret = 0;
/* Skip failed payloads */ /* Skip failed payloads */
if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) { if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name); payload->port->connector->name);
return -EIO; return -EIO;
} }
......
...@@ -524,11 +524,11 @@ __alloc_range_bias(struct drm_buddy *mm, ...@@ -524,11 +524,11 @@ __alloc_range_bias(struct drm_buddy *mm,
continue; continue;
} }
if (!fallback && block_incompatible(block, flags))
continue;
if (contains(start, end, block_start, block_end) && if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) { order == drm_buddy_block_order(block)) {
if (!fallback && block_incompatible(block, flags))
continue;
/* /*
* Find the free block within the range. * Find the free block within the range.
*/ */
......
...@@ -1241,7 +1241,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, ...@@ -1241,7 +1241,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream) if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true); intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, drm_dp_add_payload_part2(&intel_dp->mst_mgr,
drm_atomic_get_mst_payload_state(mst_state, connector->port)); drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (DISPLAY_VER(dev_priv) >= 12) if (DISPLAY_VER(dev_priv) >= 12)
......
...@@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state, ...@@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
msto->disabled = false; msto->disabled = false;
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload); drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
} else if (msto->enabled) { } else if (msto->enabled) {
drm_dp_add_payload_part2(mgr, state, new_payload); drm_dp_add_payload_part2(mgr, new_payload);
msto->enabled = false; msto->enabled = false;
} }
} }
......
...@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) ...@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
break; break;
} }
case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
getparam->value = 1;
break;
default: default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL; return -EINVAL;
......
...@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
} }
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
if (!nouveau_cli_uvmm(cli) || internal) {
/* for BO noVM allocs, don't assign kinds */
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
nvbo->kind = (tile_flags & 0x00007f00) >> 8; if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
nvbo->comp = (tile_flags & 0x00030000) >> 16; kfree(nvbo);
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { return ERR_PTR(-EINVAL);
kfree(nvbo); }
return ERR_PTR(-EINVAL);
} nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else { } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->zeta = (tile_flags & 0x00000007); nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
} }
nvbo->mode = tile_mode; } else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */ /* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) { for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail /* Because we cannot currently allow VMM maps to fail
...@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
} }
nvbo->page = vmm->page[pi].shift; nvbo->page = vmm->page[pi].shift;
} else { } else {
/* reject other tile flags when in VM mode. */
if (tile_mode)
return ERR_PTR(-EINVAL);
if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
return ERR_PTR(-EINVAL);
/* Determine the desirable target GPU page size for the buffer. */ /* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) { for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail /* Because we cannot currently allow VMM maps to fail
......
...@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work) ...@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_gpu_l2_power_on(ptdev); panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev); panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev); ret = panthor_fw_post_reset(ptdev);
if (ret)
goto out_dev_exit;
atomic_set(&ptdev->reset.pending, 0); atomic_set(&ptdev->reset.pending, 0);
panthor_sched_post_reset(ptdev); panthor_sched_post_reset(ptdev, ret != 0);
out_dev_exit:
drm_dev_exit(cookie); drm_dev_exit(cookie);
if (ret) { if (ret) {
...@@ -293,6 +288,7 @@ static const struct panthor_exception_info panthor_exception_infos[] = { ...@@ -293,6 +288,7 @@ static const struct panthor_exception_info panthor_exception_infos[] = {
PANTHOR_EXCEPTION(ACTIVE), PANTHOR_EXCEPTION(ACTIVE),
PANTHOR_EXCEPTION(CS_RES_TERM), PANTHOR_EXCEPTION(CS_RES_TERM),
PANTHOR_EXCEPTION(CS_CONFIG_FAULT), PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT), PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
PANTHOR_EXCEPTION(CS_BUS_FAULT), PANTHOR_EXCEPTION(CS_BUS_FAULT),
PANTHOR_EXCEPTION(CS_INSTR_INVALID), PANTHOR_EXCEPTION(CS_INSTR_INVALID),
......
...@@ -216,6 +216,7 @@ enum drm_panthor_exception_type { ...@@ -216,6 +216,7 @@ enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f, DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f, DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40, DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41,
DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44, DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48, DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49, DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
......
...@@ -453,7 +453,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev, ...@@ -453,7 +453,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
ret = panthor_kernel_bo_vmap(mem); ret = panthor_kernel_bo_vmap(mem);
if (ret) { if (ret) {
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), mem); panthor_kernel_bo_destroy(mem);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1134,7 +1134,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ...@@ -1134,7 +1134,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_fw_stop(ptdev); panthor_fw_stop(ptdev);
list_for_each_entry(section, &ptdev->fw->sections, node) list_for_each_entry(section, &ptdev->fw->sections, node)
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), section->mem); panthor_kernel_bo_destroy(section->mem);
/* We intentionally don't call panthor_vm_idle() and let /* We intentionally don't call panthor_vm_idle() and let
* panthor_mmu_unplug() release the AS we acquired with * panthor_mmu_unplug() release the AS we acquired with
...@@ -1142,6 +1142,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ...@@ -1142,6 +1142,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
* state to keep the active_refcnt balanced. * state to keep the active_refcnt balanced.
*/ */
panthor_vm_put(ptdev->fw->vm); panthor_vm_put(ptdev->fw->vm);
ptdev->fw->vm = NULL;
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
} }
......
...@@ -26,18 +26,18 @@ static void panthor_gem_free_object(struct drm_gem_object *obj) ...@@ -26,18 +26,18 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
/** /**
* panthor_kernel_bo_destroy() - Destroy a kernel buffer object * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
* @vm: The VM this BO was mapped to.
* @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
* is skipped. * is skipped.
*/ */
void panthor_kernel_bo_destroy(struct panthor_vm *vm, void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *bo)
{ {
struct panthor_vm *vm;
int ret; int ret;
if (IS_ERR_OR_NULL(bo)) if (IS_ERR_OR_NULL(bo))
return; return;
vm = bo->vm;
panthor_kernel_bo_vunmap(bo); panthor_kernel_bo_vunmap(bo);
if (drm_WARN_ON(bo->obj->dev, if (drm_WARN_ON(bo->obj->dev,
...@@ -53,6 +53,7 @@ void panthor_kernel_bo_destroy(struct panthor_vm *vm, ...@@ -53,6 +53,7 @@ void panthor_kernel_bo_destroy(struct panthor_vm *vm,
drm_gem_object_put(bo->obj); drm_gem_object_put(bo->obj);
out_free_bo: out_free_bo:
panthor_vm_put(vm);
kfree(bo); kfree(bo);
} }
...@@ -106,6 +107,7 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, ...@@ -106,6 +107,7 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
if (ret) if (ret)
goto err_free_va; goto err_free_va;
kbo->vm = panthor_vm_get(vm);
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem); drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv; bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
......
...@@ -61,6 +61,11 @@ struct panthor_kernel_bo { ...@@ -61,6 +61,11 @@ struct panthor_kernel_bo {
*/ */
struct drm_gem_object *obj; struct drm_gem_object *obj;
/**
* @vm: VM this private buffer is attached to.
*/
struct panthor_vm *vm;
/** /**
* @va_node: VA space allocated to this GEM. * @va_node: VA space allocated to this GEM.
*/ */
...@@ -136,7 +141,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, ...@@ -136,7 +141,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags, size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va); u64 gpu_va);
void panthor_kernel_bo_destroy(struct panthor_vm *vm, void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
struct panthor_kernel_bo *bo);
#endif /* __PANTHOR_GEM_H__ */ #endif /* __PANTHOR_GEM_H__ */
...@@ -127,7 +127,7 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm, ...@@ -127,7 +127,7 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
heap->chunk_count--; heap->chunk_count--;
mutex_unlock(&heap->lock); mutex_unlock(&heap->lock);
panthor_kernel_bo_destroy(vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk); kfree(chunk);
} }
...@@ -183,7 +183,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, ...@@ -183,7 +183,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
return 0; return 0;
err_destroy_bo: err_destroy_bo:
panthor_kernel_bo_destroy(vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
err_free_chunk: err_free_chunk:
kfree(chunk); kfree(chunk);
...@@ -253,8 +253,8 @@ int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle) ...@@ -253,8 +253,8 @@ int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
* @pool: Pool to instantiate the heap context from. * @pool: Pool to instantiate the heap context from.
* @initial_chunk_count: Number of chunk allocated at initialization time. * @initial_chunk_count: Number of chunk allocated at initialization time.
* Must be at least 1. * Must be at least 1.
* @chunk_size: The size of each chunk. Must be a power of two between 256k * @chunk_size: The size of each chunk. Must be page-aligned and lie in the
* and 2M. * [128k:8M] range.
* @max_chunks: Maximum number of chunks that can be allocated. * @max_chunks: Maximum number of chunks that can be allocated.
* @target_in_flight: Maximum number of in-flight render passes. * @target_in_flight: Maximum number of in-flight render passes.
* @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap * @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
...@@ -281,8 +281,11 @@ int panthor_heap_create(struct panthor_heap_pool *pool, ...@@ -281,8 +281,11 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (initial_chunk_count == 0) if (initial_chunk_count == 0)
return -EINVAL; return -EINVAL;
if (hweight32(chunk_size) != 1 || if (initial_chunk_count > max_chunks)
chunk_size < SZ_256K || chunk_size > SZ_2M) return -EINVAL;
if (!IS_ALIGNED(chunk_size, PAGE_SIZE) ||
chunk_size < SZ_128K || chunk_size > SZ_8M)
return -EINVAL; return -EINVAL;
down_read(&pool->lock); down_read(&pool->lock);
...@@ -320,7 +323,8 @@ int panthor_heap_create(struct panthor_heap_pool *pool, ...@@ -320,7 +323,8 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (!pool->vm) { if (!pool->vm) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL); ret = xa_alloc(&pool->xa, &id, heap,
XA_LIMIT(0, MAX_HEAPS_PER_POOL - 1), GFP_KERNEL);
if (!ret) { if (!ret) {
void *gpu_ctx = panthor_get_heap_ctx(pool, id); void *gpu_ctx = panthor_get_heap_ctx(pool, id);
...@@ -391,7 +395,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, ...@@ -391,7 +395,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
mutex_unlock(&heap->lock); mutex_unlock(&heap->lock);
if (removed) { if (removed) {
panthor_kernel_bo_destroy(pool->vm, chunk->bo); panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk); kfree(chunk);
ret = 0; ret = 0;
} else { } else {
...@@ -410,6 +414,13 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, ...@@ -410,6 +414,13 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
* @renderpasses_in_flight: Number of render passes currently in-flight. * @renderpasses_in_flight: Number of render passes currently in-flight.
* @pending_frag_count: Number of fragment jobs waiting for execution/completion. * @pending_frag_count: Number of fragment jobs waiting for execution/completion.
* @new_chunk_gpu_va: Pointer used to return the chunk VA. * @new_chunk_gpu_va: Pointer used to return the chunk VA.
*
* Return:
* - 0 if a new heap was allocated
* - -ENOMEM if the tiler context reached the maximum number of chunks
* or if too many render passes are in-flight
* or if the allocation failed
* - -EINVAL if any of the arguments passed to panthor_heap_grow() is invalid
*/ */
int panthor_heap_grow(struct panthor_heap_pool *pool, int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va, u64 heap_gpu_va,
...@@ -439,10 +450,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool, ...@@ -439,10 +450,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
* handler provided by the userspace driver, if any). * handler provided by the userspace driver, if any).
*/ */
if (renderpasses_in_flight > heap->target_in_flight || if (renderpasses_in_flight > heap->target_in_flight ||
(pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) { heap->chunk_count >= heap->max_chunks) {
ret = -EBUSY;
goto out_unlock;
} else if (heap->chunk_count >= heap->max_chunks) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
...@@ -536,7 +544,7 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) ...@@ -536,7 +544,7 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->vm = vm; pool->vm = vm;
pool->ptdev = ptdev; pool->ptdev = ptdev;
init_rwsem(&pool->lock); init_rwsem(&pool->lock);
xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1); xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
kref_init(&pool->refcount); kref_init(&pool->refcount);
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize, pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
...@@ -587,7 +595,7 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool) ...@@ -587,7 +595,7 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i)); drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
if (!IS_ERR_OR_NULL(pool->gpu_contexts)) if (!IS_ERR_OR_NULL(pool->gpu_contexts))
panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts); panthor_kernel_bo_destroy(pool->gpu_contexts);
/* Reflects the fact the pool has been destroyed. */ /* Reflects the fact the pool has been destroyed. */
pool->vm = NULL; pool->vm = NULL;
......
...@@ -826,8 +826,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * ...@@ -826,8 +826,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_queue_put_syncwait_obj(queue); panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(group->vm, queue->ringbuf); panthor_kernel_bo_destroy(queue->ringbuf);
panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem); panthor_kernel_bo_destroy(queue->iface.mem);
kfree(queue); kfree(queue);
} }
...@@ -837,15 +837,14 @@ static void group_release_work(struct work_struct *work) ...@@ -837,15 +837,14 @@ static void group_release_work(struct work_struct *work)
struct panthor_group *group = container_of(work, struct panthor_group *group = container_of(work,
struct panthor_group, struct panthor_group,
release_work); release_work);
struct panthor_device *ptdev = group->ptdev;
u32 i; u32 i;
for (i = 0; i < group->queue_count; i++) for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]); group_free_queue(group, group->queues[i]);
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf); panthor_kernel_bo_destroy(group->suspend_buf);
panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf); panthor_kernel_bo_destroy(group->protm_suspend_buf);
panthor_kernel_bo_destroy(group->vm, group->syncobjs); panthor_kernel_bo_destroy(group->syncobjs);
panthor_vm_put(group->vm); panthor_vm_put(group->vm);
kfree(group); kfree(group);
...@@ -1281,7 +1280,16 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, ...@@ -1281,7 +1280,16 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
if (group) if (group)
group->fatal_queues |= BIT(cs_id); group->fatal_queues |= BIT(cs_id);
sched_queue_delayed_work(sched, tick, 0); if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
/* If this exception is unrecoverable, queue a reset, and make
* sure we stop scheduling groups until the reset has happened.
*/
panthor_device_schedule_reset(ptdev);
cancel_delayed_work(&sched->tick_work);
} else {
sched_queue_delayed_work(sched, tick, 0);
}
drm_warn(&ptdev->base, drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n" "CSG slot %d CS slot: %d\n"
"CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
...@@ -1385,7 +1393,12 @@ static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) ...@@ -1385,7 +1393,12 @@ static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
pending_frag_count, &new_chunk_va); pending_frag_count, &new_chunk_va);
} }
if (ret && ret != -EBUSY) { /* If the heap context doesn't have memory for us, we want to let the
* FW try to reclaim memory by waiting for fragment jobs to land or by
* executing the tiler OOM exception handler, which is supposed to
* implement incremental rendering.
*/
if (ret && ret != -ENOMEM) {
drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
group->fatal_queues |= BIT(cs_id); group->fatal_queues |= BIT(cs_id);
sched_queue_delayed_work(sched, tick, 0); sched_queue_delayed_work(sched, tick, 0);
...@@ -2720,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev) ...@@ -2720,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev)
mutex_unlock(&sched->reset.lock); mutex_unlock(&sched->reset.lock);
} }
void panthor_sched_post_reset(struct panthor_device *ptdev) void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
{ {
struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_group *group, *group_tmp; struct panthor_group *group, *group_tmp;
mutex_lock(&sched->reset.lock); mutex_lock(&sched->reset.lock);
list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
/* Consider all previously running group as terminated if the
* reset failed.
*/
if (reset_failed)
group->state = PANTHOR_CS_GROUP_TERMINATED;
panthor_group_start(group); panthor_group_start(group);
}
/* We're done resetting the GPU, clear the reset.in_progress bit so we can /* We're done resetting the GPU, clear the reset.in_progress bit so we can
* kick the scheduler. * kick the scheduler.
...@@ -2736,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev) ...@@ -2736,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
atomic_set(&sched->reset.in_progress, false); atomic_set(&sched->reset.in_progress, false);
mutex_unlock(&sched->reset.lock); mutex_unlock(&sched->reset.lock);
sched_queue_delayed_work(sched, tick, 0); /* No need to queue a tick and update syncs if the reset failed. */
if (!reset_failed) {
sched_queue_work(sched, sync_upd); sched_queue_delayed_work(sched, tick, 0);
sched_queue_work(sched, sync_upd);
}
} }
static void group_sync_upd_work(struct work_struct *work) static void group_sync_upd_work(struct work_struct *work)
......
...@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile); ...@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile);
int panthor_sched_init(struct panthor_device *ptdev); int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev); void panthor_sched_unplug(struct panthor_device *ptdev);
void panthor_sched_pre_reset(struct panthor_device *ptdev); void panthor_sched_pre_reset(struct panthor_device *ptdev);
void panthor_sched_post_reset(struct panthor_device *ptdev); void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed);
void panthor_sched_suspend(struct panthor_device *ptdev); void panthor_sched_suspend(struct panthor_device *ptdev);
void panthor_sched_resume(struct panthor_device *ptdev); void panthor_sched_resume(struct panthor_device *ptdev);
......
...@@ -940,7 +940,7 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer, ...@@ -940,7 +940,7 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
* zynqmp_disp_layer_find_live_format - Find format information for given * zynqmp_disp_layer_find_live_format - Find format information for given
* media bus format * media bus format
* @layer: The layer * @layer: The layer
* @drm_fmt: Media bus format to search * @media_bus_format: Media bus format to search
* *
* Search display subsystem format information corresponding to the given media * Search display subsystem format information corresponding to the given media
* bus format @media_bus_format for the @layer, and return a pointer to the * bus format @media_bus_format for the @layer, and return a pointer to the
...@@ -981,7 +981,7 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer, ...@@ -981,7 +981,7 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int i; unsigned int i;
u32 *formats; u32 *formats;
if (WARN_ON(!layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)) { if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_NONLIVE)) {
*num_formats = 0; *num_formats = 0;
return NULL; return NULL;
} }
...@@ -1117,7 +1117,7 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer, ...@@ -1117,7 +1117,7 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
/** /**
* zynqmp_disp_layer_set_live_format - Set the live video layer format * zynqmp_disp_layer_set_live_format - Set the live video layer format
* @layer: The layer * @layer: The layer
* @info: The format info * @media_bus_format: Media bus format to set
* *
* NOTE: This function should not be used to set format for non-live video * NOTE: This function should not be used to set format for non-live video
* layer. Use zynqmp_disp_layer_set_format() instead. * layer. Use zynqmp_disp_layer_set_format() instead.
......
...@@ -871,7 +871,6 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, ...@@ -871,7 +871,6 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload); struct drm_dp_mst_atomic_payload *payload);
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload); struct drm_dp_mst_atomic_payload *payload);
void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
......
...@@ -68,6 +68,13 @@ extern "C" { ...@@ -68,6 +68,13 @@ extern "C" {
*/ */
#define NOUVEAU_GETPARAM_VRAM_USED 19 #define NOUVEAU_GETPARAM_VRAM_USED 19
/*
* NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
*
* Query whether tile mode and PTE kind are accepted with VM allocs or not.
*/
#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
struct drm_nouveau_getparam { struct drm_nouveau_getparam {
__u64 param; __u64 param;
__u64 value; __u64 value;
......
...@@ -895,13 +895,21 @@ struct drm_panthor_tiler_heap_create { ...@@ -895,13 +895,21 @@ struct drm_panthor_tiler_heap_create {
/** @vm_id: VM ID the tiler heap should be mapped to */ /** @vm_id: VM ID the tiler heap should be mapped to */
__u32 vm_id; __u32 vm_id;
/** @initial_chunk_count: Initial number of chunks to allocate. */ /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
__u32 initial_chunk_count; __u32 initial_chunk_count;
/** @chunk_size: Chunk size. Must be a power of two at least 256KB large. */ /**
* @chunk_size: Chunk size.
*
* Must be page-aligned and lie in the [128k:8M] range.
*/
__u32 chunk_size; __u32 chunk_size;
/** @max_chunks: Maximum number of chunks that can be allocated. */ /**
* @max_chunks: Maximum number of chunks that can be allocated.
*
* Must be at least @initial_chunk_count.
*/
__u32 max_chunks; __u32 max_chunks;
/** /**
...@@ -931,7 +939,11 @@ struct drm_panthor_tiler_heap_create { ...@@ -931,7 +939,11 @@ struct drm_panthor_tiler_heap_create {
* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
*/ */
struct drm_panthor_tiler_heap_destroy { struct drm_panthor_tiler_heap_destroy {
/** @handle: Handle of the tiler heap to destroy */ /**
* @handle: Handle of the tiler heap to destroy.
*
* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
*/
__u32 handle; __u32 handle;
/** @pad: Padding field, MBZ. */ /** @pad: Padding field, MBZ. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment