Commit 5ebfb7a5 authored by Eric Bernstein's avatar Eric Bernstein Committed by Alex Deucher

drm/amd/display: Move DCC support functions into dchubbub

Added dchububu.h header file for common enum/struct definitions.
Added new interface functions get_dcc_compression_cap,
dcc_support_swizzle, dcc_support_pixel_format.
Signed-off-by: default avatarEric Bernstein <eric.bernstein@amd.com>
Reviewed-by: default avatarDmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: default avatarHarry Wentland <harry.wentland@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c4b0faae
......@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
}
static bool hubbub1_dcc_support_swizzle(
enum swizzle_mode_values swizzle,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert)
{
bool standard_swizzle = false;
bool display_swizzle = false;
switch (swizzle) {
case DC_SW_4KB_S:
case DC_SW_64KB_S:
case DC_SW_VAR_S:
case DC_SW_4KB_S_X:
case DC_SW_64KB_S_X:
case DC_SW_VAR_S_X:
standard_swizzle = true;
break;
case DC_SW_4KB_D:
case DC_SW_64KB_D:
case DC_SW_VAR_D:
case DC_SW_4KB_D_X:
case DC_SW_64KB_D_X:
case DC_SW_VAR_D_X:
display_swizzle = true;
break;
default:
break;
}
if (bytes_per_element == 1 && standard_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__na;
return true;
}
if (bytes_per_element == 2 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 4 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && standard_swizzle) {
*segment_order_horz = segment_order__na;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && display_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
return false;
}
static bool hubbub1_dcc_support_pixel_format(
enum surface_pixel_format format,
unsigned int *bytes_per_element)
{
/* DML: get_bytes_per_element */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
*bytes_per_element = 2;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
return true;
default:
return false;
}
}
static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
unsigned int bytes_per_element)
{
/* copied from DML. might want to refactor DML to leverage from DML */
/* DML : get_blk256_size */
if (bytes_per_element == 1) {
*blk256_width = 16;
*blk256_height = 16;
} else if (bytes_per_element == 2) {
*blk256_width = 16;
*blk256_height = 8;
} else if (bytes_per_element == 4) {
*blk256_width = 8;
*blk256_height = 8;
} else if (bytes_per_element == 8) {
*blk256_width = 8;
*blk256_height = 4;
}
}
static void hubbub1_det_request_size(
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = height * blk256_height * bpe;
swath_bytes_vert_wc = width * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
}
static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
struct dc *dc = hubbub->ctx->dc;
/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
enum dcc_control dcc_control;
unsigned int bpe;
enum segment_order segment_order_horz, segment_order_vert;
bool req128_horz_wc, req128_vert_wc;
memset(output, 0, sizeof(*output));
if (dc->debug.disable_dcc == DCC_DISABLE)
return false;
if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
return false;
if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
&segment_order_horz, &segment_order_vert))
return false;
hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
dcc_control = dcc_control__256_256_xxx;
} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
if (!req128_horz_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_horz == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
if (!req128_vert_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_vert == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else {
if ((req128_horz_wc &&
segment_order_horz == segment_order__non_contiguous) ||
(req128_vert_wc &&
segment_order_vert == segment_order__non_contiguous))
/* access_dir not known, must use most constraining */
dcc_control = dcc_control__256_64_64;
else
/* reg128 is true for either horz and vert
* but segment_order is contiguous
*/
dcc_control = dcc_control__128_128_xxx;
}
if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
dcc_control != dcc_control__256_256_xxx)
return false;
switch (dcc_control) {
case dcc_control__256_256_xxx:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 256;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__128_128_xxx:
output->grph.rgb.max_uncompressed_blk_size = 128;
output->grph.rgb.max_compressed_blk_size = 128;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__256_64_64:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
}
output->capable = true;
output->const_color_support = false;
return true;
}
static const struct hubbub_funcs hubbub1_funcs = {
.update_dchub = hubbub1_update_dchub
.update_dchub = hubbub1_update_dchub,
.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
};
void hubbub1_construct(struct hubbub *hubbub,
......
......@@ -27,6 +27,7 @@
#define __DC_HUBBUB_DCN10_H__
#include "core_types.h"
#include "dchubbub.h"
#define HUBHUB_REG_LIST_DCN()\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
......@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
struct dcn_hubbub_wm_set sets[4];
};
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
struct dchub_init_data *dh_data);
};
struct hubbub {
const struct hubbub_funcs *funcs;
struct dc_context *ctx;
......
......@@ -937,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
return idle_pipe;
}
enum dcc_control {
dcc_control__256_256_xxx,
dcc_control__128_128_xxx,
dcc_control__256_64_64,
};
enum segment_order {
segment_order__na,
segment_order__contiguous,
segment_order__non_contiguous,
};
static bool dcc_support_pixel_format(
enum surface_pixel_format format,
unsigned int *bytes_per_element)
{
/* DML: get_bytes_per_element */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
*bytes_per_element = 2;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
return true;
default:
return false;
}
}
static bool dcc_support_swizzle(
enum swizzle_mode_values swizzle,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert)
{
bool standard_swizzle = false;
bool display_swizzle = false;
switch (swizzle) {
case DC_SW_4KB_S:
case DC_SW_64KB_S:
case DC_SW_VAR_S:
case DC_SW_4KB_S_X:
case DC_SW_64KB_S_X:
case DC_SW_VAR_S_X:
standard_swizzle = true;
break;
case DC_SW_4KB_D:
case DC_SW_64KB_D:
case DC_SW_VAR_D:
case DC_SW_4KB_D_X:
case DC_SW_64KB_D_X:
case DC_SW_VAR_D_X:
display_swizzle = true;
break;
default:
break;
}
if (bytes_per_element == 1 && standard_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__na;
return true;
}
if (bytes_per_element == 2 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 4 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && standard_swizzle) {
*segment_order_horz = segment_order__na;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && display_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
return false;
}
static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
unsigned int bytes_per_element)
{
/* copied from DML. might want to refactor DML to leverage from DML */
/* DML : get_blk256_size */
if (bytes_per_element == 1) {
*blk256_width = 16;
*blk256_height = 16;
} else if (bytes_per_element == 2) {
*blk256_width = 16;
*blk256_height = 8;
} else if (bytes_per_element == 4) {
*blk256_width = 8;
*blk256_height = 8;
} else if (bytes_per_element == 8) {
*blk256_width = 8;
*blk256_height = 4;
}
}
static void det_request_size(
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = height * blk256_height * bpe;
swath_bytes_vert_wc = width * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
}
static bool get_dcc_compression_cap(const struct dc *dc,
static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
enum dcc_control dcc_control;
unsigned int bpe;
enum segment_order segment_order_horz, segment_order_vert;
bool req128_horz_wc, req128_vert_wc;
memset(output, 0, sizeof(*output));
if (dc->debug.disable_dcc == DCC_DISABLE)
return false;
if (!dcc_support_pixel_format(input->format,
&bpe))
return false;
if (!dcc_support_swizzle(input->swizzle_mode, bpe,
&segment_order_horz, &segment_order_vert))
return false;
det_request_size(input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
dcc_control = dcc_control__256_256_xxx;
} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
if (!req128_horz_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_horz == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
if (!req128_vert_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_vert == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else {
if ((req128_horz_wc &&
segment_order_horz == segment_order__non_contiguous) ||
(req128_vert_wc &&
segment_order_vert == segment_order__non_contiguous))
/* access_dir not known, must use most constraining */
dcc_control = dcc_control__256_64_64;
else
/* reg128 is true for either horz and vert
* but segment_order is contiguous
*/
dcc_control = dcc_control__128_128_xxx;
}
if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
dcc_control != dcc_control__256_256_xxx)
return false;
switch (dcc_control) {
case dcc_control__256_256_xxx:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 256;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__128_128_xxx:
output->grph.rgb.max_uncompressed_blk_size = 128;
output->grph.rgb.max_compressed_blk_size = 128;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__256_64_64:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
}
output->capable = true;
output->const_color_support = false;
return true;
return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
dc->res_pool->hubbub,
input,
output);
}
static void dcn10_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
......@@ -1186,7 +967,7 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = get_dcc_compression_cap
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
static struct resource_funcs dcn10_res_pool_funcs = {
......
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_DCHUBBUB_H__
#define __DAL_DCHUBBUB_H__
enum dcc_control {
dcc_control__256_256_xxx,
dcc_control__128_128_xxx,
dcc_control__256_64_64,
};
enum segment_order {
segment_order__na,
segment_order__contiguous,
segment_order__non_contiguous,
};
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
struct dchub_init_data *dh_data);
bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
bool (*dcc_support_swizzle)(
enum swizzle_mode_values swizzle,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert);
bool (*dcc_support_pixel_format)(
enum surface_pixel_format format,
unsigned int *bytes_per_element);
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment