Commit 0a86b0db authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2022-06-23' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Multiple fixes in sun4i for suspend, DDC, DMA setup; A rework of vc4 to
properly split the driver between hardware capabilities that wasn't done
properly causing multiple crashes; and a panel quirk for Aya Neo Next
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220623064152.ubjmnpj7tdejdcw6@houat
parents 382cf35f 85016f66
...@@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = { ...@@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
}, },
.driver_data = (void *)&lcd800x1280_rightside_up, .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi HiBook (CWI514) */ }, { /* Chuwi HiBook (CWI514) */
.matches = { .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/component.h> #include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/kfifo.h> #include <linux/kfifo.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_graph.h> #include <linux/of_graph.h>
...@@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev) ...@@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm; goto free_drm;
} }
dev_set_drvdata(dev, drm);
drm->dev_private = drv; drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list); INIT_LIST_HEAD(&drv->engine_list);
...@@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev) ...@@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev)
drm_fbdev_generic_setup(drm, 32); drm_fbdev_generic_setup(drm, 32);
dev_set_drvdata(dev, drm);
return 0; return 0;
finish_poll: finish_poll:
...@@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev) ...@@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
dev_set_drvdata(dev, NULL);
drm_dev_unregister(drm); drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm); drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm); drm_atomic_helper_shutdown(drm);
...@@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev) ...@@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev)
INIT_KFIFO(list.fifo); INIT_KFIFO(list.fifo);
/*
* DE2 and DE3 cores actually supports 40-bit addresses, but
* driver does not.
*/
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
for (i = 0;; i++) { for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np, struct device_node *pipeline = of_parse_phandle(np,
"allwinner,pipelines", "allwinner,pipelines",
......
...@@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane, ...@@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
struct sun4i_layer *layer = plane_to_sun4i_layer(plane); struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
if (IS_ERR_OR_NULL(layer->backend->frontend)) if (IS_ERR_OR_NULL(layer->backend->frontend))
sun4i_backend_format_is_supported(format, modifier); return sun4i_backend_format_is_supported(format, modifier);
return sun4i_backend_format_is_supported(format, modifier) || return sun4i_backend_format_is_supported(format, modifier) ||
sun4i_frontend_format_is_supported(format, modifier); sun4i_frontend_format_is_supported(format, modifier);
......
...@@ -93,34 +93,10 @@ static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm, ...@@ -93,34 +93,10 @@ static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
return crtcs; return crtcs;
} }
static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
struct platform_device **pdev_out)
{
struct platform_device *pdev;
struct device_node *remote;
remote = of_graph_get_remote_node(dev->of_node, 1, -1);
if (!remote)
return -ENODEV;
if (!of_device_is_compatible(remote, "hdmi-connector")) {
of_node_put(remote);
return -ENODEV;
}
pdev = of_find_device_by_node(remote);
of_node_put(remote);
if (!pdev)
return -ENODEV;
*pdev_out = pdev;
return 0;
}
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data) void *data)
{ {
struct platform_device *pdev = to_platform_device(dev), *connector_pdev; struct platform_device *pdev = to_platform_device(dev);
struct dw_hdmi_plat_data *plat_data; struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data; struct drm_device *drm = data;
struct device_node *phy_node; struct device_node *phy_node;
...@@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, ...@@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return dev_err_probe(dev, PTR_ERR(hdmi->regulator), return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
"Couldn't get regulator\n"); "Couldn't get regulator\n");
ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
if (!ret) {
hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
"ddc-en", GPIOD_OUT_HIGH);
platform_device_put(connector_pdev);
if (IS_ERR(hdmi->ddc_en)) {
dev_err(dev, "Couldn't get ddc-en gpio\n");
return PTR_ERR(hdmi->ddc_en);
}
}
ret = regulator_enable(hdmi->regulator); ret = regulator_enable(hdmi->regulator);
if (ret) { if (ret) {
dev_err(dev, "Failed to enable regulator\n"); dev_err(dev, "Failed to enable regulator\n");
goto err_unref_ddc_en; return ret;
} }
gpiod_set_value(hdmi->ddc_en, 1);
ret = reset_control_deassert(hdmi->rst_ctrl); ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) { if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n"); dev_err(dev, "Could not deassert ctrl reset control\n");
goto err_disable_ddc_en; goto err_disable_regulator;
} }
ret = clk_prepare_enable(hdmi->clk_tmds); ret = clk_prepare_enable(hdmi->clk_tmds);
...@@ -245,12 +207,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, ...@@ -245,12 +207,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
clk_disable_unprepare(hdmi->clk_tmds); clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset: err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl); reset_control_assert(hdmi->rst_ctrl);
err_disable_ddc_en: err_disable_regulator:
gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator); regulator_disable(hdmi->regulator);
err_unref_ddc_en:
if (hdmi->ddc_en)
gpiod_put(hdmi->ddc_en);
return ret; return ret;
} }
...@@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master, ...@@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_deinit(hdmi->phy); sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds); clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl); reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator); regulator_disable(hdmi->regulator);
if (hdmi->ddc_en)
gpiod_put(hdmi->ddc_en);
} }
static const struct component_ops sun8i_dw_hdmi_ops = { static const struct component_ops sun8i_dw_hdmi_ops = {
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <drm/bridge/dw_hdmi.h> #include <drm/bridge/dw_hdmi.h>
#include <drm/drm_encoder.h> #include <drm/drm_encoder.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/reset.h> #include <linux/reset.h>
...@@ -193,7 +192,6 @@ struct sun8i_dw_hdmi { ...@@ -193,7 +192,6 @@ struct sun8i_dw_hdmi {
struct regulator *regulator; struct regulator *regulator;
const struct sun8i_dw_hdmi_quirks *quirks; const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl; struct reset_control *rst_ctrl;
struct gpio_desc *ddc_en;
}; };
extern struct platform_driver sun8i_hdmi_phy_driver; extern struct platform_driver sun8i_hdmi_phy_driver;
......
...@@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) ...@@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
{ {
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_lock(&vc4->purgeable.lock); mutex_lock(&vc4->purgeable.lock);
list_add_tail(&bo->size_head, &vc4->purgeable.list); list_add_tail(&bo->size_head, &vc4->purgeable.list);
vc4->purgeable.num++; vc4->purgeable.num++;
...@@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) ...@@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
{ {
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
/* list_del_init() is used here because the caller might release /* list_del_init() is used here because the caller might release
* the purgeable lock in order to acquire the madv one and update the * the purgeable lock in order to acquire the madv one and update the
* madv status. * madv status.
...@@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) ...@@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo) if (!bo)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, ...@@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
struct drm_gem_cma_object *cma_obj; struct drm_gem_cma_object *cma_obj;
struct vc4_bo *bo; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
if (size == 0) if (size == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, ...@@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo; return bo;
} }
int vc4_dumb_create(struct drm_file *file_priv, int vc4_bo_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
{ {
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL; struct vc4_bo *bo = NULL;
int ret; int ret;
if (args->pitch < min_pitch) if (WARN_ON_ONCE(vc4->is_vc5))
args->pitch = min_pitch; return -ENODEV;
if (args->size < args->pitch * args->height) ret = vc4_dumb_fixup_args(args);
args->size = args->pitch * args->height; if (ret)
return ret;
bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
if (IS_ERR(bo)) if (IS_ERR(bo))
...@@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work) ...@@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
int vc4_bo_inc_usecnt(struct vc4_bo *bo) int vc4_bo_inc_usecnt(struct vc4_bo *bo)
{ {
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
/* Fast path: if the BO is already retained by someone, no need to /* Fast path: if the BO is already retained by someone, no need to
* check the madv status. * check the madv status.
*/ */
...@@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) ...@@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
void vc4_bo_dec_usecnt(struct vc4_bo *bo) void vc4_bo_dec_usecnt(struct vc4_bo *bo)
{ {
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
/* Fast path: if the BO is still retained by someone, no need to test /* Fast path: if the BO is still retained by someone, no need to test
* the madv value. * the madv value.
*/ */
...@@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, ...@@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL; struct vc4_bo *bo = NULL;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
ret = vc4_grab_bin_bo(vc4, vc4file); ret = vc4_grab_bin_bo(vc4, vc4file);
if (ret) if (ret)
return ret; return ret;
...@@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, ...@@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_mmap_bo *args = data; struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
...@@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, ...@@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL; struct vc4_bo *bo = NULL;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (args->size == 0) if (args->size == 0)
return -EINVAL; return -EINVAL;
...@@ -875,11 +907,15 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, ...@@ -875,11 +907,15 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_set_tiling *args = data; struct drm_vc4_set_tiling *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct vc4_bo *bo; struct vc4_bo *bo;
bool t_format; bool t_format;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (args->flags != 0) if (args->flags != 0)
return -EINVAL; return -EINVAL;
...@@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, ...@@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_tiling *args = data; struct drm_vc4_get_tiling *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct vc4_bo *bo; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (args->flags != 0 || args->modifier != 0) if (args->flags != 0 || args->modifier != 0)
return -EINVAL; return -EINVAL;
...@@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev) ...@@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
int i; int i;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
/* Create the initial set of BO labels that the kernel will /* Create the initial set of BO labels that the kernel will
* use. This lets us avoid a bunch of string reallocation in * use. This lets us avoid a bunch of string reallocation in
* the kernel's draw and BO allocation paths. * the kernel's draw and BO allocation paths.
...@@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, ...@@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
int ret = 0, label; int ret = 0, label;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!args->len) if (!args->len)
return -EINVAL; return -EINVAL;
......
...@@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) ...@@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
* Removing 1 from the FIFO full level however * Removing 1 from the FIFO full level however
* seems to completely remove that issue. * seems to completely remove that issue.
*/ */
if (!vc4->hvs->hvs5) if (!vc4->is_vc5)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
...@@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode ...@@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi) if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
if (vc4->hvs->hvs5) if (vc4->is_vc5)
CRTC_WRITE(PV_MUX_CFG, CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
...@@ -775,17 +775,18 @@ struct vc4_async_flip_state { ...@@ -775,17 +775,18 @@ struct vc4_async_flip_state {
struct drm_framebuffer *old_fb; struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct vc4_seqno_cb cb; union {
struct dma_fence_cb fence;
struct vc4_seqno_cb seqno;
} cb;
}; };
/* Called when the V3D execution for the BO being flipped to is done, so that /* Called when the V3D execution for the BO being flipped to is done, so that
* we can actually update the plane's address to point to it. * we can actually update the plane's address to point to it.
*/ */
static void static void
vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
{ {
struct vc4_async_flip_state *flip_state =
container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc; struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary; struct drm_plane *plane = crtc->primary;
...@@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) ...@@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc); drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb); drm_framebuffer_put(flip_state->fb);
/* Decrement the BO usecnt in order to keep the inc/dec calls balanced if (flip_state->old_fb)
* when the planes are updated through the async update path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
if (flip_state->old_fb) {
struct drm_gem_cma_object *cma_bo;
struct vc4_bo *bo;
cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
vc4_bo_dec_usecnt(bo);
drm_framebuffer_put(flip_state->old_fb); drm_framebuffer_put(flip_state->old_fb);
}
kfree(flip_state); kfree(flip_state);
} }
/* Implements async (non-vblank-synced) page flips. static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
{
struct vc4_async_flip_state *flip_state =
container_of(cb, struct vc4_async_flip_state, cb.seqno);
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
struct drm_gem_cma_object *cma_bo =
drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
/*
* Decrement the BO usecnt in order to keep the inc/dec
* calls balanced when the planes are updated through
* the async update path.
* *
* The page flip ioctl needs to return immediately, so we grab the * FIXME: we should move to generic async-page-flip when
* modeset semaphore on the pipe, and queue the address update for * it's available, so that we can get rid of this
* when V3D is done with the BO being flipped to. * hand-made cleanup_fb() logic.
*/ */
static int vc4_async_page_flip(struct drm_crtc *crtc, if (bo)
vc4_bo_dec_usecnt(bo);
}
static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
container_of(cb, struct vc4_async_flip_state, cb.fence);
vc4_async_page_flip_complete(flip_state);
dma_fence_put(fence);
}
static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
/* If there's no fence, complete the page flip immediately */
if (!fence) {
vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
return 0;
}
/* If the fence has already been completed, complete the page flip */
if (dma_fence_add_callback(fence, &flip_state->cb.fence,
vc4_async_page_flip_fence_complete))
vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
return 0;
}
static int
vc4_async_page_flip_common(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t flags) uint32_t flags)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary; struct drm_plane *plane = crtc->primary;
int ret = 0;
struct vc4_async_flip_state *flip_state; struct vc4_async_flip_state *flip_state;
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
/* Increment the BO usecnt here, so that we never end up with an
* unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
* plane is later updated through the non-async path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made prepare_fb()
* logic.
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
if (!flip_state) { if (!flip_state)
vc4_bo_dec_usecnt(bo);
return -ENOMEM; return -ENOMEM;
}
drm_framebuffer_get(fb); drm_framebuffer_get(fb);
flip_state->fb = fb; flip_state->fb = fb;
...@@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, ...@@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
*/ */
drm_atomic_set_fb_for_plane(plane->state, fb); drm_atomic_set_fb_for_plane(plane->state, fb);
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, vc4_async_set_fence_cb(dev, flip_state);
vc4_async_page_flip_complete);
/* Driver takes ownership of state on successful async commit. */ /* Driver takes ownership of state on successful async commit. */
return 0; return 0;
} }
/* Implements async (non-vblank-synced) page flips.
*
* The page flip ioctl needs to return immediately, so we grab the
* modeset semaphore on the pipe, and queue the address update for
* when V3D is done with the BO being flipped to.
*/
static int vc4_async_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
/*
* Increment the BO usecnt here, so that we never end up with an
* unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
* plane is later updated through the non-async path.
*
* FIXME: we should move to generic async-page-flip when
* it's available, so that we can get rid of this
* hand-made prepare_fb() logic.
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
ret = vc4_async_page_flip_common(crtc, fb, event, flags);
if (ret) {
vc4_bo_dec_usecnt(bo);
return ret;
}
return 0;
}
static int vc5_async_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
return vc4_async_page_flip_common(crtc, fb, event, flags);
}
int vc4_page_flip(struct drm_crtc *crtc, int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t flags, uint32_t flags,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
if (flags & DRM_MODE_PAGE_FLIP_ASYNC) if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
return vc4_async_page_flip(crtc, fb, event, flags); struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->is_vc5)
return vc5_async_page_flip(crtc, fb, event, flags);
else else
return vc4_async_page_flip(crtc, fb, event, flags);
} else {
return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx); return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
}
} }
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
...@@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, ...@@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
crtc_funcs, NULL); crtc_funcs, NULL);
drm_crtc_helper_add(crtc, crtc_helper_funcs); drm_crtc_helper_add(crtc, crtc_helper_funcs);
if (!vc4->hvs->hvs5) { if (!vc4->is_vc5) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
......
...@@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index) ...@@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
return map; return map;
} }
int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args)
{
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
return 0;
}
static int vc5_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
int ret;
ret = vc4_dumb_fixup_args(args);
if (ret)
return ret;
return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data, static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
...@@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, ...@@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0) if (args->pad != 0)
return -EINVAL; return -EINVAL;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) if (!vc4->v3d)
return -ENODEV; return -ENODEV;
...@@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, ...@@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
static int vc4_open(struct drm_device *dev, struct drm_file *file) static int vc4_open(struct drm_device *dev, struct drm_file *file)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file; struct vc4_file *vc4file;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
if (!vc4file) if (!vc4file)
return -ENOMEM; return -ENOMEM;
vc4file->dev = vc4;
vc4_perfmon_open_file(vc4file); vc4_perfmon_open_file(vc4file);
file->driver_priv = vc4file; file->driver_priv = vc4file;
...@@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) ...@@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv; struct vc4_file *vc4file = file->driver_priv;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (vc4file->bin_bo_used) if (vc4file->bin_bo_used)
vc4_v3d_bin_bo_put(vc4); vc4_v3d_bin_bo_put(vc4);
...@@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { ...@@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
}; };
static struct drm_driver vc4_drm_driver = { static const struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET | .driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC | DRIVER_ATOMIC |
DRIVER_GEM | DRIVER_GEM |
...@@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = { ...@@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object, .gem_create_object = vc4_create_object,
DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create), DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls, .ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
...@@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = { ...@@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL, .patchlevel = DRIVER_PATCHLEVEL,
}; };
static const struct drm_driver vc5_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM),
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
#endif
DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static void vc4_match_add_drivers(struct device *dev, static void vc4_match_add_drivers(struct device *dev,
struct component_match **match, struct component_match **match,
struct platform_driver *const *drivers, struct platform_driver *const *drivers,
...@@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev, ...@@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev,
static int vc4_drm_bind(struct device *dev) static int vc4_drm_bind(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
const struct drm_driver *driver;
struct rpi_firmware *firmware = NULL; struct rpi_firmware *firmware = NULL;
struct drm_device *drm; struct drm_device *drm;
struct vc4_dev *vc4; struct vc4_dev *vc4;
struct device_node *node; struct device_node *node;
struct drm_crtc *crtc; struct drm_crtc *crtc;
bool is_vc5;
int ret = 0; int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32); dev->coherent_dma_mask = DMA_BIT_MASK(32);
/* If VC4 V3D is missing, don't advertise render nodes. */ is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); if (is_vc5)
if (!node || !of_device_is_available(node)) driver = &vc5_drm_driver;
vc4_drm_driver.driver_features &= ~DRIVER_RENDER; else
of_node_put(node); driver = &vc4_drm_driver;
vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base); vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
if (IS_ERR(vc4)) if (IS_ERR(vc4))
return PTR_ERR(vc4); return PTR_ERR(vc4);
vc4->is_vc5 = is_vc5;
drm = &vc4->base; drm = &vc4->base;
platform_set_drvdata(pdev, drm); platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list); INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
mutex_init(&vc4->bin_bo_lock); mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm); ret = vc4_bo_cache_init(drm);
if (ret) if (ret)
return ret; return ret;
}
ret = drmm_mode_config_init(drm); ret = drmm_mode_config_init(drm);
if (ret) if (ret)
return ret; return ret;
if (!is_vc5) {
ret = vc4_gem_init(drm); ret = vc4_gem_init(drm);
if (ret) if (ret)
return ret; return ret;
}
node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
if (node) { if (node) {
...@@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev) ...@@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev)
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver); ret = drm_aperture_remove_framebuffers(false, driver);
if (ret) if (ret)
return ret; return ret;
......
...@@ -48,6 +48,8 @@ enum vc4_kernel_bo_type { ...@@ -48,6 +48,8 @@ enum vc4_kernel_bo_type {
* done. This way, only events related to a specific job will be counted. * done. This way, only events related to a specific job will be counted.
*/ */
struct vc4_perfmon { struct vc4_perfmon {
struct vc4_dev *dev;
/* Tracks the number of users of the perfmon, when this counter reaches /* Tracks the number of users of the perfmon, when this counter reaches
* zero the perfmon is destroyed. * zero the perfmon is destroyed.
*/ */
...@@ -74,6 +76,8 @@ struct vc4_perfmon { ...@@ -74,6 +76,8 @@ struct vc4_perfmon {
struct vc4_dev { struct vc4_dev {
struct drm_device base; struct drm_device base;
bool is_vc5;
unsigned int irq; unsigned int irq;
struct vc4_hvs *hvs; struct vc4_hvs *hvs;
...@@ -316,6 +320,7 @@ struct vc4_v3d { ...@@ -316,6 +320,7 @@ struct vc4_v3d {
}; };
struct vc4_hvs { struct vc4_hvs {
struct vc4_dev *vc4;
struct platform_device *pdev; struct platform_device *pdev;
void __iomem *regs; void __iomem *regs;
u32 __iomem *dlist; u32 __iomem *dlist;
...@@ -333,9 +338,6 @@ struct vc4_hvs { ...@@ -333,9 +338,6 @@ struct vc4_hvs {
struct drm_mm_node mitchell_netravali_filter; struct drm_mm_node mitchell_netravali_filter;
struct debugfs_regset32 regset; struct debugfs_regset32 regset;
/* HVS version 5 flag, therefore requires updated dlist structures */
bool hvs5;
}; };
struct vc4_plane { struct vc4_plane {
...@@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state) ...@@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define VC4_REG32(reg) { .name = #reg, .offset = reg } #define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info { struct vc4_exec_info {
struct vc4_dev *dev;
/* Sequence number for this bin/render job. */ /* Sequence number for this bin/render job. */
uint64_t seqno; uint64_t seqno;
...@@ -701,6 +705,8 @@ struct vc4_exec_info { ...@@ -701,6 +705,8 @@ struct vc4_exec_info {
* released when the DRM file is closed should be placed here. * released when the DRM file is closed should be placed here.
*/ */
struct vc4_file { struct vc4_file {
struct vc4_dev *dev;
struct { struct {
struct idr idr; struct idr idr;
struct mutex lock; struct mutex lock;
...@@ -814,7 +820,7 @@ struct vc4_validated_shader_info { ...@@ -814,7 +820,7 @@ struct vc4_validated_shader_info {
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type); bool from_cache, enum vc4_kernel_bo_type type);
int vc4_dumb_create(struct drm_file *file_priv, int vc4_bo_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data, int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
...@@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm, ...@@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
/* vc4_drv.c */ /* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
/* vc4_dpi.c */ /* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver; extern struct platform_driver vc4_dpi_driver;
......
...@@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, ...@@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i; u32 i;
int ret = 0; int ret = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) { if (!vc4->v3d) {
DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
return -ENODEV; return -ENODEV;
...@@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, ...@@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
unsigned long timeout_expire; unsigned long timeout_expire;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (vc4->finished_seqno >= seqno) if (vc4->finished_seqno >= seqno)
return 0; return 0;
...@@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev) ...@@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec; struct vc4_exec_info *exec;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
again: again:
exec = vc4_first_bin_job(vc4); exec = vc4_first_bin_job(vc4);
if (!exec) if (!exec)
...@@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev) ...@@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec) if (!exec)
return; return;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
/* A previous RCL may have written to one of our textures, and /* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before * our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not * that RCL completed. Flush the texture cache now, but not
...@@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) ...@@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list); bool was_empty = list_empty(&vc4->render_job_list);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
list_move_tail(&exec->head, &vc4->render_job_list); list_move_tail(&exec->head, &vc4->render_job_list);
if (was_empty) if (was_empty)
vc4_submit_next_render_job(dev); vc4_submit_next_render_job(dev);
...@@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4) ...@@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
unsigned long irqflags; unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp; struct vc4_seqno_cb *cb, *cb_temp;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
spin_lock_irqsave(&vc4->job_lock, irqflags); spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) { while (!list_empty(&vc4->job_done_list)) {
struct vc4_exec_info *exec = struct vc4_exec_info *exec =
...@@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev, ...@@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags; unsigned long irqflags;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
cb->func = func; cb->func = func;
INIT_WORK(&cb->work, vc4_seqno_cb_work); INIT_WORK(&cb->work, vc4_seqno_cb_work);
...@@ -1083,8 +1104,12 @@ int ...@@ -1083,8 +1104,12 @@ int
vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_wait_seqno *args = data; struct drm_vc4_wait_seqno *args = data;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
&args->timeout_ns); &args->timeout_ns);
} }
...@@ -1093,11 +1118,15 @@ int ...@@ -1093,11 +1118,15 @@ int
vc4_wait_bo_ioctl(struct drm_device *dev, void *data, vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret; int ret;
struct drm_vc4_wait_bo *args = data; struct drm_vc4_wait_bo *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct vc4_bo *bo; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (args->pad != 0) if (args->pad != 0)
return -EINVAL; return -EINVAL;
...@@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
args->shader_rec_size, args->shader_rec_size,
args->bo_handle_count); args->bo_handle_count);
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) { if (!vc4->v3d) {
DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
return -ENODEV; return -ENODEV;
...@@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("malloc failure on exec struct\n"); DRM_ERROR("malloc failure on exec struct\n");
return -ENOMEM; return -ENOMEM;
} }
exec->dev = vc4;
ret = vc4_v3d_pm_get(vc4); ret = vc4_v3d_pm_get(vc4);
if (ret) { if (ret) {
...@@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev) ...@@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
vc4->dma_fence_context = dma_fence_context_alloc(1); vc4->dma_fence_context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&vc4->bin_job_list); INIT_LIST_HEAD(&vc4->bin_job_list);
...@@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused) ...@@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused)
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_gem_madvise *args = data; struct drm_vc4_gem_madvise *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct vc4_bo *bo; struct vc4_bo *bo;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
switch (args->madv) { switch (args->madv) {
case VC4_MADV_DONTNEED: case VC4_MADV_DONTNEED:
case VC4_MADV_WILLNEED: case VC4_MADV_WILLNEED:
......
...@@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode, ...@@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, unsigned int bpc,
enum vc4_hdmi_output_format fmt) enum vc4_hdmi_output_format fmt)
{ {
unsigned long long clock = mode->clock * 1000; unsigned long long clock = mode->clock * 1000ULL;
if (mode->flags & DRM_MODE_FLAG_DBLCLK) if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock = clock * 2; clock = clock * 2;
......
...@@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo) ...@@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
{ {
struct vc4_dev *vc4 = hvs->vc4;
u32 reg; u32 reg;
int ret; int ret;
if (!hvs->hvs5) if (!vc4->is_vc5)
return output; return output;
switch (output) { switch (output) {
...@@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) ...@@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot) struct drm_display_mode *mode, bool oneshot)
{ {
struct vc4_dev *vc4 = hvs->vc4;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel; unsigned int chan = vc4_crtc_state->assigned_channel;
...@@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, ...@@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/ */
dispctrl = SCALER_DISPCTRLX_ENABLE; dispctrl = SCALER_DISPCTRLX_ENABLE;
if (!hvs->hvs5) if (!vc4->is_vc5)
dispctrl |= VC4_SET_FIELD(mode->hdisplay, dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) | SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay, VC4_SET_FIELD(mode->vdisplay,
...@@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, ...@@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
SCALER_DISPBKGND_AUTOHS | SCALER_DISPBKGND_AUTOHS |
((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) | ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0)); (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
/* Reload the LUT, since the SRAMs would have been disabled if /* Reload the LUT, since the SRAMs would have been disabled if
...@@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) ...@@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (!hvs) if (!hvs)
return -ENOMEM; return -ENOMEM;
hvs->vc4 = vc4;
hvs->pdev = pdev; hvs->pdev = pdev;
if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs"))
hvs->hvs5 = true;
hvs->regs = vc4_ioremap_regs(pdev, 0); hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs)) if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs); return PTR_ERR(hvs->regs);
...@@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) ...@@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->regset.regs = hvs_regs; hvs->regset.regs = hvs_regs;
hvs->regset.nregs = ARRAY_SIZE(hvs_regs); hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
if (hvs->hvs5) { if (vc4->is_vc5) {
hvs->core_clk = devm_clk_get(&pdev->dev, NULL); hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hvs->core_clk)) { if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n"); dev_err(&pdev->dev, "Couldn't get core clock\n");
...@@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) ...@@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
} }
} }
if (!hvs->hvs5) if (!vc4->is_vc5)
hvs->dlist = hvs->regs + SCALER_DLIST_START; hvs->dlist = hvs->regs + SCALER_DLIST_START;
else else
hvs->dlist = hvs->regs + SCALER5_DLIST_START; hvs->dlist = hvs->regs + SCALER5_DLIST_START;
...@@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) ...@@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* between planes when they don't overlap on the screen, but * between planes when they don't overlap on the screen, but
* for now we just allocate globally. * for now we just allocate globally.
*/ */
if (!hvs->hvs5) if (!vc4->is_vc5)
/* 48k words of 2x12-bit pixels */ /* 48k words of 2x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else else
......
...@@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev) ...@@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (!vc4->v3d) if (!vc4->v3d)
return; return;
...@@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev) ...@@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (!vc4->v3d) if (!vc4->v3d)
return; return;
...@@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev) ...@@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev)
int vc4_irq_install(struct drm_device *dev, int irq) int vc4_irq_install(struct drm_device *dev, int irq)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (irq == IRQ_NOTCONNECTED) if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN; return -ENOTCONN;
...@@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev) ...@@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
if (WARN_ON_ONCE(vc4->is_vc5))
return;
vc4_irq_disable(dev); vc4_irq_disable(dev);
free_irq(vc4->irq, dev); free_irq(vc4->irq, dev);
} }
...@@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev) ...@@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags; unsigned long irqflags;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
/* Acknowledge any stale IRQs. */ /* Acknowledge any stale IRQs. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
......
...@@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
old_hvs_state->fifo_state[channel].pending_commit = NULL; old_hvs_state->fifo_state[channel].pending_commit = NULL;
} }
if (vc4->hvs->hvs5) { if (vc4->is_vc5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate, unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate); new_hvs_state->core_clock_rate);
unsigned long core_rate = max_t(unsigned long, unsigned long core_rate = max_t(unsigned long,
...@@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_ctm_commit(vc4, state); vc4_ctm_commit(vc4, state);
if (vc4->hvs->hvs5) if (vc4->is_vc5)
vc5_hvs_pv_muxing_commit(vc4, state); vc5_hvs_pv_muxing_commit(vc4, state);
else else
vc4_hvs_pv_muxing_commit(vc4, state); vc4_hvs_pv_muxing_commit(vc4, state);
...@@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_helper_cleanup_planes(dev, state);
if (vc4->hvs->hvs5) { if (vc4->is_vc5) {
drm_dbg(dev, "Running the core clock at %lu Hz\n", drm_dbg(dev, "Running the core clock at %lu Hz\n",
new_hvs_state->core_clock_rate); new_hvs_state->core_clock_rate);
...@@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, ...@@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local; struct drm_mode_fb_cmd2 mode_cmd_local;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
/* If the user didn't specify a modifier, use the /* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl() state for the BO. * vc4_set_tiling_ioctl() state for the BO.
*/ */
...@@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = { ...@@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
.fb_create = vc4_fb_create, .fb_create = vc4_fb_create,
}; };
static const struct drm_mode_config_funcs vc5_mode_funcs = {
.atomic_check = vc4_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
int vc4_kms_load(struct drm_device *dev) int vc4_kms_load(struct drm_device *dev)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
"brcm,bcm2711-vc5");
int ret; int ret;
/* /*
...@@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev) ...@@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev)
* the BCM2711, but the load tracker computations are used for * the BCM2711, but the load tracker computations are used for
* the core clock rate calculation. * the core clock rate calculation.
*/ */
if (!is_vc5) { if (!vc4->is_vc5) {
/* Start with the load tracker enabled. Can be /* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file. * disabled through the debugfs load_tracker file.
*/ */
...@@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev) ...@@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev)
return ret; return ret;
} }
if (is_vc5) { if (vc4->is_vc5) {
dev->mode_config.max_width = 7680; dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680; dev->mode_config.max_height = 7680;
} else { } else {
...@@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev) ...@@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.max_height = 2048; dev->mode_config.max_height = 2048;
} }
dev->mode_config.funcs = &vc4_mode_funcs; dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24; dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true; dev->mode_config.async_page_flip = true;
......
...@@ -17,13 +17,27 @@ ...@@ -17,13 +17,27 @@
void vc4_perfmon_get(struct vc4_perfmon *perfmon) void vc4_perfmon_get(struct vc4_perfmon *perfmon)
{ {
struct vc4_dev *vc4 = perfmon->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (perfmon) if (perfmon)
refcount_inc(&perfmon->refcnt); refcount_inc(&perfmon->refcnt);
} }
void vc4_perfmon_put(struct vc4_perfmon *perfmon) void vc4_perfmon_put(struct vc4_perfmon *perfmon)
{ {
if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) struct vc4_dev *vc4;
if (!perfmon)
return;
vc4 = perfmon->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (refcount_dec_and_test(&perfmon->refcnt))
kfree(perfmon); kfree(perfmon);
} }
...@@ -32,6 +46,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) ...@@ -32,6 +46,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
unsigned int i; unsigned int i;
u32 mask; u32 mask;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
return; return;
...@@ -49,6 +66,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, ...@@ -49,6 +66,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
{ {
unsigned int i; unsigned int i;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
if (WARN_ON_ONCE(!vc4->active_perfmon || if (WARN_ON_ONCE(!vc4->active_perfmon ||
perfmon != vc4->active_perfmon)) perfmon != vc4->active_perfmon))
return; return;
...@@ -64,8 +84,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, ...@@ -64,8 +84,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
{ {
struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon; struct vc4_perfmon *perfmon;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
mutex_lock(&vc4file->perfmon.lock); mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, id); perfmon = idr_find(&vc4file->perfmon.idr, id);
vc4_perfmon_get(perfmon); vc4_perfmon_get(perfmon);
...@@ -76,8 +100,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) ...@@ -76,8 +100,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
void vc4_perfmon_open_file(struct vc4_file *vc4file) void vc4_perfmon_open_file(struct vc4_file *vc4file)
{ {
struct vc4_dev *vc4 = vc4file->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_init(&vc4file->perfmon.lock); mutex_init(&vc4file->perfmon.lock);
idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
vc4file->dev = vc4;
} }
static int vc4_perfmon_idr_del(int id, void *elem, void *data) static int vc4_perfmon_idr_del(int id, void *elem, void *data)
...@@ -91,6 +121,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data) ...@@ -91,6 +121,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data)
void vc4_perfmon_close_file(struct vc4_file *vc4file) void vc4_perfmon_close_file(struct vc4_file *vc4file)
{ {
struct vc4_dev *vc4 = vc4file->dev;
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_lock(&vc4file->perfmon.lock); mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr); idr_destroy(&vc4file->perfmon.idr);
...@@ -107,6 +142,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, ...@@ -107,6 +142,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
unsigned int i; unsigned int i;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) { if (!vc4->v3d) {
DRM_DEBUG("Creating perfmon no VC4 V3D probed\n"); DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
return -ENODEV; return -ENODEV;
...@@ -127,6 +165,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, ...@@ -127,6 +165,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
GFP_KERNEL); GFP_KERNEL);
if (!perfmon) if (!perfmon)
return -ENOMEM; return -ENOMEM;
perfmon->dev = vc4;
for (i = 0; i < req->ncounters; i++) for (i = 0; i < req->ncounters; i++)
perfmon->events[i] = req->events[i]; perfmon->events[i] = req->events[i];
...@@ -157,6 +196,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -157,6 +196,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_perfmon_destroy *req = data; struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon; struct vc4_perfmon *perfmon;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) { if (!vc4->v3d) {
DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n"); DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
return -ENODEV; return -ENODEV;
...@@ -182,6 +224,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, ...@@ -182,6 +224,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct vc4_perfmon *perfmon; struct vc4_perfmon *perfmon;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (!vc4->v3d) { if (!vc4->v3d) {
DRM_DEBUG("Getting perfmon no VC4 V3D probed\n"); DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
return -ENODEV; return -ENODEV;
......
...@@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) ...@@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
} }
/* Align it to 64 or 128 (hvs5) bytes */ /* Align it to 64 or 128 (hvs5) bytes */
lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
lbm /= vc4->hvs->hvs5 ? 4 : 2; lbm /= vc4->is_vc5 ? 4 : 2;
return lbm; return lbm;
} }
...@@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) ...@@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm, &vc4_state->lbm,
lbm_size, lbm_size,
vc4->hvs->hvs5 ? 64 : 32, vc4->is_vc5 ? 64 : 32,
0, 0); 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
...@@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, ...@@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha; fb->format->has_alpha;
if (!vc4->hvs->hvs5) { if (!vc4->is_vc5) {
/* Control word */ /* Control word */
vc4_dlist_write(vc4_state, vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID | SCALER_CTL0_VALID |
...@@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, ...@@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
old_vc4_state = to_vc4_plane_state(plane->state); old_vc4_state = to_vc4_plane_state(plane->state);
new_vc4_state = to_vc4_plane_state(new_plane_state); new_vc4_state = to_vc4_plane_state(new_plane_state);
if (!new_vc4_state->hw_dlist)
return -EINVAL;
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
...@@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { ...@@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update, .atomic_async_update = vc4_plane_atomic_async_update,
}; };
static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.atomic_async_check = vc4_plane_atomic_async_check,
.atomic_async_update = vc4_plane_atomic_async_update,
};
static bool vc4_format_mod_supported(struct drm_plane *plane, static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint32_t format,
uint64_t modifier) uint64_t modifier)
...@@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = { ...@@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
struct drm_plane *vc4_plane_init(struct drm_device *dev, struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type) enum drm_plane_type type)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = NULL; struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane; struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)]; u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0; int num_formats = 0;
int ret = 0; int ret = 0;
unsigned i; unsigned i;
bool hvs5 = of_device_is_compatible(dev->dev->of_node,
"brcm,bcm2711-vc5");
static const uint64_t modifiers[] = { static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128, DRM_FORMAT_MOD_BROADCOM_SAND128,
...@@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, ...@@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (!hvs_formats[i].hvs5_only || hvs5) { if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm; formats[num_formats] = hvs_formats[i].drm;
num_formats++; num_formats++;
} }
...@@ -1490,6 +1500,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, ...@@ -1490,6 +1500,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
if (vc4->is_vc5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs); drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
drm_plane_create_alpha_property(plane); drm_plane_create_alpha_property(plane);
......
...@@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, ...@@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_rcl_setup setup = {0}; struct vc4_rcl_setup setup = {0};
struct drm_vc4_submit_cl *args = exec->args; struct drm_vc4_submit_cl *args = exec->args;
bool has_bin = args->bin_cl_size != 0; bool has_bin = args->bin_cl_size != 0;
int ret; int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
if (args->min_x_tile > args->max_x_tile || if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) { args->min_y_tile > args->max_y_tile) {
DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
......
...@@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) ...@@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
int int
vc4_v3d_pm_get(struct vc4_dev *vc4) vc4_v3d_pm_get(struct vc4_dev *vc4)
{ {
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
mutex_lock(&vc4->power_lock); mutex_lock(&vc4->power_lock);
if (vc4->power_refcount++ == 0) { if (vc4->power_refcount++ == 0) {
int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
...@@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) ...@@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
void void
vc4_v3d_pm_put(struct vc4_dev *vc4) vc4_v3d_pm_put(struct vc4_dev *vc4)
{ {
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_lock(&vc4->power_lock); mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) { if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
...@@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) ...@@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
uint64_t seqno = 0; uint64_t seqno = 0;
struct vc4_exec_info *exec; struct vc4_exec_info *exec;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
try_again: try_again:
spin_lock_irqsave(&vc4->job_lock, irqflags); spin_lock_irqsave(&vc4->job_lock, irqflags);
slot = ffs(~vc4->bin_alloc_used); slot = ffs(~vc4->bin_alloc_used);
...@@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) ...@@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
{ {
int ret = 0; int ret = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
mutex_lock(&vc4->bin_bo_lock); mutex_lock(&vc4->bin_bo_lock);
if (used && *used) if (used && *used)
...@@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref) ...@@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref)
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
{ {
if (WARN_ON_ONCE(vc4->is_vc5))
return;
mutex_lock(&vc4->bin_bo_lock); mutex_lock(&vc4->bin_bo_lock);
kref_put(&vc4->bin_bo_kref, bin_bo_release); kref_put(&vc4->bin_bo_kref, bin_bo_release);
mutex_unlock(&vc4->bin_bo_lock); mutex_unlock(&vc4->bin_bo_lock);
......
...@@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp) ...@@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
struct drm_gem_cma_object * struct drm_gem_cma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{ {
struct vc4_dev *vc4 = exec->dev;
struct drm_gem_cma_object *obj; struct drm_gem_cma_object *obj;
struct vc4_bo *bo; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
if (hindex >= exec->bo_count) { if (hindex >= exec->bo_count) {
DRM_DEBUG("BO index %d greater than BO count %d\n", DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count); hindex, exec->bo_count);
...@@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, ...@@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
uint32_t offset, uint8_t tiling_format, uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp) uint32_t width, uint32_t height, uint8_t cpp)
{ {
struct vc4_dev *vc4 = exec->dev;
uint32_t aligned_width, aligned_height, stride, size; uint32_t aligned_width, aligned_height, stride, size;
uint32_t utile_w = utile_width(cpp); uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp); uint32_t utile_h = utile_height(cpp);
if (WARN_ON_ONCE(vc4->is_vc5))
return false;
/* The shaded vertex format stores signed 12.4 fixed point /* The shaded vertex format stores signed 12.4 fixed point
* (-2048,2047) offsets from the viewport center, so we should * (-2048,2047) offsets from the viewport center, so we should
* never have a render target larger than 4096. The texture * never have a render target larger than 4096. The texture
...@@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev, ...@@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev,
void *unvalidated, void *unvalidated,
struct vc4_exec_info *exec) struct vc4_exec_info *exec)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t len = exec->args->bin_cl_size; uint32_t len = exec->args->bin_cl_size;
uint32_t dst_offset = 0; uint32_t dst_offset = 0;
uint32_t src_offset = 0; uint32_t src_offset = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
while (src_offset < len) { while (src_offset < len) {
void *dst_pkt = validated + dst_offset; void *dst_pkt = validated + dst_offset;
void *src_pkt = unvalidated + src_offset; void *src_pkt = unvalidated + src_offset;
...@@ -926,9 +938,13 @@ int ...@@ -926,9 +938,13 @@ int
vc4_validate_shader_recs(struct drm_device *dev, vc4_validate_shader_recs(struct drm_device *dev,
struct vc4_exec_info *exec) struct vc4_exec_info *exec)
{ {
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t i; uint32_t i;
int ret = 0; int ret = 0;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
for (i = 0; i < exec->shader_state_count; i++) { for (i = 0; i < exec->shader_state_count; i++) {
ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
if (ret) if (ret)
......
...@@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) ...@@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
struct vc4_validated_shader_info * struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj) vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{ {
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false; bool found_shader_end = false;
int shader_end_ip = 0; int shader_end_ip = 0;
uint32_t last_thread_switch_ip = -3; uint32_t last_thread_switch_ip = -3;
...@@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state; struct vc4_shader_validation_state validation_state;
if (WARN_ON_ONCE(vc4->is_vc5))
return NULL;
memset(&validation_state, 0, sizeof(validation_state)); memset(&validation_state, 0, sizeof(validation_state));
validation_state.shader = shader_obj->vaddr; validation_state.shader = shader_obj->vaddr;
validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t); validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment