Commit d315495d authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

This time, a bunch of cleanups and refactoring work so that we can get
dt bindings upstream.  In general, we keep compatibility with existing
downstream bindings as much as possible, to make backports to device
kernels easier, but now we have cleaner upstream bindings so that we
can start landing gpu/display support in upstream dts files.

Plus shrinker and madvise support, which has been on my todo list for
a long time.  And support for arbitrary # of cmd bufs in submit ioctl
(I've got libdrm+mesa userspace for this on branches) to enable some
of the mesa batch/reorder stuff I'm working on.  Userspace decides
whether this is supported based on advertised driver version.  For the
interesting userspace bits, see:

  https://github.com/freedreno/libdrm/commit/1baf03ac6e77049d9c8be1e3d5164283ce82c9db

Plus support for ASoC hdmi audio codec, and few other random other
cleanups.

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (52 commits)
  drm/msm: Delete an unnecessary check before drm_gem_object_unreference()
  drm/msm: Delete unnecessary checks before drm_gem_object_unreference_unlocked()
  drm/msm/hdmi: Delete an unnecessary check before the function call "kfree"
  drm/msm: return -EFAULT instead of bytes remaining
  drm/msm/hdmi: use PTR_ERR_OR_ZERO() to simplify the code
  drm/msm: add missing of_node_put after calling of_parse_phandle
  drm/msm: Replace drm_fb_get_bpp_depth() with drm_format_plane_cpp()
  drm/msm/dsi: Fix return value check in msm_dsi_host_set_display_mode()
  drm: msm: Add ASoC generic hdmi audio codec support.
  drm/msm/rd: add module param to dump all bo's
  drm/msm/rd: split out snapshot_buf helper
  drm/msm: bump kernel api version
  drm/msm: deal with arbitrary # of cmd buffers
  drm/msm: wire up vmap shrinker
  drm/msm: change gem->vmap() to get/put
  drm/msm: shrinker support
  drm/msm: add put_iova() helper
  drm/msm: add madvise ioctl
  drm/msm: use mutex_lock_interruptible for submit ioctl
  dt-bindings: msm/mdp: Provide details on MDP interface ports
  ...
parents 2d635fde 0a677125
...@@ -11,8 +11,7 @@ Required properties: ...@@ -11,8 +11,7 @@ Required properties:
be 0 or 1, since we have 2 DSI controllers at most for now. be 0 or 1, since we have 2 DSI controllers at most for now.
- interrupts: The interrupt signal from the DSI block. - interrupts: The interrupt signal from the DSI block.
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: device clocks - clocks: Phandles to device clocks.
See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "mdp_core_clk" * "mdp_core_clk"
* "iface_clk" * "iface_clk"
...@@ -23,16 +22,21 @@ Required properties: ...@@ -23,16 +22,21 @@ Required properties:
* "core_clk" * "core_clk"
For DSIv2, we need an additional clock: For DSIv2, we need an additional clock:
* "src_clk" * "src_clk"
- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node - vdd-supply: phandle to vdd regulator device node
- vddio-supply: phandle to vdd-io regulator device node - vddio-supply: phandle to vdd-io regulator device node
- vdda-supply: phandle to vdda regulator device node - vdda-supply: phandle to vdda regulator device node
- qcom,dsi-phy: phandle to DSI PHY device node - phys: phandle to DSI PHY device node
- phy-names: the name of the corresponding PHY device
- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2) - syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
- ports: Contains 2 DSI controller ports as child nodes. Each port contains
an endpoint subnode as defined in [2] and [3].
Optional properties: Optional properties:
- panel@0: Node of panel connected to this DSI controller. - panel@0: Node of panel connected to this DSI controller.
See files in Documentation/devicetree/bindings/display/panel/ for each supported See files in [4] for each supported panel.
panel.
- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is - qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
driving a panel which needs 2 DSI links. driving a panel which needs 2 DSI links.
- qcom,master-dsi: Boolean value indicating if the DSI controller is driving - qcom,master-dsi: Boolean value indicating if the DSI controller is driving
...@@ -44,34 +48,38 @@ Optional properties: ...@@ -44,34 +48,38 @@ Optional properties:
- pinctrl-names: the pin control state names; should contain "default" - pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active) - pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state - pinctrl-n: the "sleep" pinctrl state
- port: DSI controller output port, containing one endpoint subnode. - ports: contains DSI controller input and output ports as children, each
containing one endpoint subnode.
DSI Endpoint properties: DSI Endpoint properties:
- remote-endpoint: set to phandle of the connected panel's endpoint. - remote-endpoint: For port@0, set to phandle of the connected panel/bridge's
See Documentation/devicetree/bindings/graph.txt for device graph info. input endpoint. For port@1, set to the MDP interface output. See [2] for
- qcom,data-lane-map: this describes how the logical DSI lanes are mapped device graph info.
to the physical lanes on the given platform. The value contained in
index n describes what logical data lane is mapped to the physical data - data-lanes: this describes how the physical DSI data lanes are mapped
lane n (DATAn, where n lies between 0 and 3). to the logical lanes on the given platform. The value contained in
index n describes what physical lane is mapped to the logical lane n
(DATAn, where n lies between 0 and 3). The clock lane position is fixed
and can't be changed. Hence, they aren't a part of the DT bindings. See
[3] for more info on the data-lanes property.
For example: For example:
qcom,data-lane-map = <3 0 1 2>; data-lanes = <3 0 1 2>;
The above mapping describes that the logical data lane DATA3 is mapped to The above mapping describes that the logical data lane DATA0 is mapped to
the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1 the physical data lane DATA3, logical DATA1 to physical DATA0, logic DATA2
to phys DATA2 and logic DATA2 to phys DATA3. to phys DATA1 and logic DATA3 to phys DATA2.
There are only a limited number of physical to logical mappings possible: There are only a limited number of physical to logical mappings possible:
<0 1 2 3>
"0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3; <1 2 3 0>
"3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; <2 3 0 1>
"2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3; <3 0 1 2>
"1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3; <0 3 2 1>
"0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3; <1 0 3 2>
"1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3; <2 1 0 3>
"2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3; <3 2 1 0>
"3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
DSI PHY: DSI PHY:
Required properties: Required properties:
...@@ -86,11 +94,12 @@ Required properties: ...@@ -86,11 +94,12 @@ Required properties:
* "dsi_pll" * "dsi_pll"
* "dsi_phy" * "dsi_phy"
* "dsi_phy_regulator" * "dsi_phy_regulator"
- clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating
2 clocks: A byte clock (index 0), and a pixel clock (index 1).
- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should - qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should
be 0 or 1, since we have 2 DSI PHYs at most for now. be 0 or 1, since we have 2 DSI PHYs at most for now.
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: device clocks - clocks: Phandles to device clocks. See [1] for details on clock bindings.
See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "iface_clk" * "iface_clk"
- vddio-supply: phandle to vdd-io regulator device node - vddio-supply: phandle to vdd-io regulator device node
...@@ -99,11 +108,16 @@ Optional properties: ...@@ -99,11 +108,16 @@ Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted. regulator is wanted.
[1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
[2] Documentation/devicetree/bindings/graph.txt
[3] Documentation/devicetree/bindings/media/video-interfaces.txt
[4] Documentation/devicetree/bindings/display/panel/
Example: Example:
mdss_dsi0: qcom,mdss_dsi@fd922800 { dsi0: dsi@fd922800 {
compatible = "qcom,mdss-dsi-ctrl"; compatible = "qcom,mdss-dsi-ctrl";
qcom,dsi-host-index = <0>; qcom,dsi-host-index = <0>;
interrupt-parent = <&mdss_mdp>; interrupt-parent = <&mdp>;
interrupts = <4 0>; interrupts = <4 0>;
reg-names = "dsi_ctrl"; reg-names = "dsi_ctrl";
reg = <0xfd922800 0x200>; reg = <0xfd922800 0x200>;
...@@ -124,19 +138,48 @@ Example: ...@@ -124,19 +138,48 @@ Example:
<&mmcc MDSS_AHB_CLK>, <&mmcc MDSS_AHB_CLK>,
<&mmcc MDSS_MDP_CLK>, <&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_PCLK0_CLK>; <&mmcc MDSS_PCLK0_CLK>;
assigned-clocks =
<&mmcc BYTE0_CLK_SRC>,
<&mmcc PCLK0_CLK_SRC>;
assigned-clock-parents =
<&dsi_phy0 0>,
<&dsi_phy0 1>;
vdda-supply = <&pma8084_l2>; vdda-supply = <&pma8084_l2>;
vdd-supply = <&pma8084_l22>; vdd-supply = <&pma8084_l22>;
vddio-supply = <&pma8084_l12>; vddio-supply = <&pma8084_l12>;
qcom,dsi-phy = <&mdss_dsi_phy0>; phys = <&dsi_phy0>;
phy-names ="dsi-phy";
qcom,dual-dsi-mode; qcom,dual-dsi-mode;
qcom,master-dsi; qcom,master-dsi;
qcom,sync-dual-dsi; qcom,sync-dual-dsi;
pinctrl-names = "default", "sleep"; pinctrl-names = "default", "sleep";
pinctrl-0 = <&mdss_dsi_active>; pinctrl-0 = <&dsi_active>;
pinctrl-1 = <&mdss_dsi_suspend>; pinctrl-1 = <&dsi_suspend>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi0_in: endpoint {
remote-endpoint = <&mdp_intf1_out>;
};
};
port@1 {
reg = <1>;
dsi0_out: endpoint {
remote-endpoint = <&panel_in>;
data-lanes = <0 1 2 3>;
};
};
};
panel: panel@0 { panel: panel@0 {
compatible = "sharp,lq101r1sx01"; compatible = "sharp,lq101r1sx01";
...@@ -152,16 +195,9 @@ Example: ...@@ -152,16 +195,9 @@ Example:
}; };
}; };
}; };
port {
dsi0_out: endpoint {
remote-endpoint = <&panel_in>;
lanes = <0 1 2 3>;
};
};
}; };
mdss_dsi_phy0: qcom,mdss_dsi_phy@fd922a00 { dsi_phy0: dsi-phy@fd922a00 {
compatible = "qcom,dsi-phy-28nm-hpm"; compatible = "qcom,dsi-phy-28nm-hpm";
qcom,dsi-phy-index = <0>; qcom,dsi-phy-index = <0>;
reg-names = reg-names =
...@@ -173,6 +209,7 @@ Example: ...@@ -173,6 +209,7 @@ Example:
<0xfd922d80 0x7b>; <0xfd922d80 0x7b>;
clock-names = "iface_clk"; clock-names = "iface_clk";
clocks = <&mmcc MDSS_AHB_CLK>; clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>;
vddio-supply = <&pma8084_l12>; vddio-supply = <&pma8084_l12>;
qcom,dsi-phy-regulator-ldo-mode; qcom,dsi-phy-regulator-ldo-mode;
......
Qualcomm adreno/snapdragon display controller Qualcomm adreno/snapdragon MDP4 display controller
Description:
This is the bindings documentation for the MDP4 display controller found in
SoCs like MSM8960, APQ8064 and MSM8660.
Required properties: Required properties:
- compatible: - compatible:
* "qcom,mdp4" - mdp4 * "qcom,mdp4" - mdp4
* "qcom,mdp5" - mdp5
- reg: Physical base address and length of the controller's registers. - reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the display controller. - interrupts: The interrupt signal from the display controller.
- connectors: array of phandles for output device(s)
- clocks: device clocks - clocks: device clocks
See ../clocks/clock-bindings.txt for details. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required. - clock-names: the following clocks are required.
For MDP4: * "core_clk"
* "core_clk" * "iface_clk"
* "iface_clk" * "bus_clk"
* "lut_clk" * "lut_clk"
* "src_clk" * "hdmi_clk"
* "hdmi_clk" * "tv_clk"
* "mdp_clk" - ports: contains the list of output ports from MDP. These connect to interfaces
For MDP5: that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
* "bus_clk" special case since it is a part of the MDP block itself).
* "iface_clk"
* "core_clk_src" Each output port contains an endpoint that describes how it is connected to an
* "core_clk" external interface. These are described by the standard properties documented
* "lut_clk" (some MDP5 versions may not need this) here:
* "vsync_clk" Documentation/devicetree/bindings/graph.txt
Documentation/devicetree/bindings/media/video-interfaces.txt
The output port mappings are:
Port 0 -> LCDC/LVDS
Port 1 -> DSI1 Cmd/Video
Port 2 -> DSI2 Cmd/Video
Port 3 -> DTV
Optional properties: Optional properties:
- gpus: phandle for gpu device
- clock-names: the following clocks are optional: - clock-names: the following clocks are optional:
* "lut_clk" * "lut_clk"
...@@ -35,25 +44,69 @@ Example: ...@@ -35,25 +44,69 @@ Example:
/ { / {
... ...
mdp: qcom,mdp@5100000 { hdmi: hdmi@4a00000 {
...
ports {
...
port@0 {
reg = <0>;
hdmi_in: endpoint {
remote-endpoint = <&mdp_dtv_out>;
};
};
...
};
...
};
...
mdp: mdp@5100000 {
compatible = "qcom,mdp4"; compatible = "qcom,mdp4";
reg = <0x05100000 0xf0000>; reg = <0x05100000 0xf0000>;
interrupts = <GIC_SPI 75 0>; interrupts = <GIC_SPI 75 0>;
connectors = <&hdmi>;
gpus = <&gpu>;
clock-names = clock-names =
"core_clk", "core_clk",
"iface_clk", "iface_clk",
"lut_clk", "lut_clk",
"src_clk",
"hdmi_clk", "hdmi_clk",
"mdp_clk"; "tv_clk";
clocks = clocks =
<&mmcc MDP_SRC>, <&mmcc MDP_CLK>,
<&mmcc MDP_AHB_CLK>, <&mmcc MDP_AHB_CLK>,
<&mmcc MDP_AXI_CLK>,
<&mmcc MDP_LUT_CLK>, <&mmcc MDP_LUT_CLK>,
<&mmcc TV_SRC>,
<&mmcc HDMI_TV_CLK>, <&mmcc HDMI_TV_CLK>,
<&mmcc MDP_TV_CLK>; <&mmcc MDP_TV_CLK>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdp_lvds_out: endpoint {
};
};
port@1 {
reg = <1>;
mdp_dsi1_out: endpoint {
};
};
port@2 {
reg = <2>;
mdp_dsi2_out: endpoint {
};
};
port@3 {
reg = <3>;
mdp_dtv_out: endpoint {
remote-endpoint = <&hdmi_in>;
};
};
};
}; };
}; };
Qualcomm adreno/snapdragon MDP5 display controller
Description:
This is the bindings documentation for the Mobile Display Subsytem(MDSS) that
encapsulates sub-blocks like MDP5, DSI, HDMI, eDP etc, and the MDP5 display
controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 and MSM8996.
MDSS:
Required properties:
- compatible:
* "qcom,mdss" - MDSS
- reg: Physical base address and length of the controller's registers.
- reg-names: The names of register regions. The following regions are required:
* "mdss_phys"
* "vbif_phys"
- interrupts: The interrupt signal from MDSS.
- interrupt-controller: identifies the node as an interrupt controller.
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
- power-domains: a power domain consumer specifier according to
Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
* "iface_clk"
* "bus_clk"
* "vsync_clk"
- #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space.
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
MDP5:
Required properties:
- compatible:
* "qcom,mdp5" - MDP5
- reg: Physical base address and length of the controller's registers.
- reg-names: The names of register regions. The following regions are required:
* "mdp_phys"
- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
- interrupt-parent: phandle to the MDSS block
through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "bus_clk"
- * "iface_clk"
- * "core_clk"
- * "vsync_clk"
- ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself).
Each output port contains an endpoint that describes how it is connected to an
external interface. These are described by the standard properties documented
here:
Documentation/devicetree/bindings/graph.txt
Documentation/devicetree/bindings/media/video-interfaces.txt
The availability of output ports can vary across SoC revisions:
For MSM8974 and APQ8084:
Port 0 -> MDP_INTF0 (eDP)
Port 1 -> MDP_INTF1 (DSI1)
Port 2 -> MDP_INTF2 (DSI2)
Port 3 -> MDP_INTF3 (HDMI)
For MSM8916:
Port 0 -> MDP_INTF1 (DSI1)
For MSM8994 and MSM8996:
Port 0 -> MDP_INTF1 (DSI1)
Port 1 -> MDP_INTF2 (DSI2)
Port 2 -> MDP_INTF3 (HDMI)
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
Example:
/ {
...
mdss: mdss@1a00000 {
compatible = "qcom,mdss";
reg = <0x1a00000 0x1000>,
<0x1ac8000 0x3000>;
reg-names = "mdss_phys", "vbif_phys";
power-domains = <&gcc MDSS_GDSC>;
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"vsync_clk"
interrupts = <0 72 0>;
interrupt-controller;
#interrupt-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
mdp: mdp@1a01000 {
compatible = "qcom,mdp5";
reg = <0x1a01000 0x90000>;
reg-names = "mdp_phys";
interrupt-parent = <&mdss>;
interrupts = <0 0>;
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"core_clk",
"vsync_clk";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdp5_intf1_out: endpoint {
remote-endpoint = <&dsi0_in>;
};
};
};
};
dsi0: dsi@1a98000 {
...
ports {
...
port@0 {
reg = <0>;
dsi0_in: endpoint {
remote-endpoint = <&mdp5_intf1_out>;
};
};
...
};
...
};
dsi_phy0: dsi-phy@1a98300 {
...
};
};
};
...@@ -10,6 +10,7 @@ config DRM_MSM ...@@ -10,6 +10,7 @@ config DRM_MSM
select SHMEM select SHMEM
select TMPFS select TMPFS
select QCOM_SCM select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC
default y default y
help help
DRM/KMS driver for MSM/snapdragon. DRM/KMS driver for MSM/snapdragon.
......
...@@ -35,6 +35,7 @@ msm-y := \ ...@@ -35,6 +35,7 @@ msm-y := \
mdp/mdp5/mdp5_crtc.o \ mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \ mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \ mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \ mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \ mdp/mdp5/mdp5_smp.o \
...@@ -45,6 +46,7 @@ msm-y := \ ...@@ -45,6 +46,7 @@ msm-y := \
msm_fence.o \ msm_fence.o \
msm_gem.o \ msm_gem.o \
msm_gem_prime.o \ msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \ msm_gem_submit.o \
msm_gpu.o \ msm_gpu.o \
msm_iommu.o \ msm_iommu.o \
......
...@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb; struct msm_ringbuffer *ring = gpu->rb;
unsigned i, ibs = 0; unsigned i;
for (i = 0; i < submit->nr_cmds; i++) { for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) { switch (submit->cmd[i].type) {
...@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, submit->cmd[i].size); OUT_RING(ring, submit->cmd[i].size);
ibs++; OUT_PKT2(ring);
break; break;
} }
} }
/* on a320, at least, we seem to need to pad things out to an
* even number of qwords to avoid issue w/ CP hanging on wrap-
* around:
*/
if (ibs % 2)
OUT_PKT2(ring);
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence->seqno); OUT_RING(ring, submit->fence->seqno);
...@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret; return ret;
} }
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) { if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n"); dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM; return -ENOMEM;
...@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu) void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{ {
if (gpu->memptrs_bo) { if (gpu->memptrs_bo) {
if (gpu->memptrs)
msm_gem_put_vaddr(gpu->memptrs_bo);
if (gpu->memptrs_iova) if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo); drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
} }
release_firmware(gpu->pm4); release_firmware(gpu->pm4);
......
...@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) ...@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
struct platform_device *phy_pdev; struct platform_device *phy_pdev;
struct device_node *phy_node; struct device_node *phy_node;
phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0); phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) { if (!phy_node) {
dev_err(&pdev->dev, "cannot find phy device\n"); dev_err(&pdev->dev, "cannot find phy device\n");
return -ENXIO; return -ENXIO;
......
...@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = { ...@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
}, },
.bus_clk_names = dsi_v2_bus_clk_names, .bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = { 0x4700000, 0x5800000 },
.num_dsi = 2,
}; };
static const char * const dsi_6g_bus_clk_names[] = { static const char * const dsi_6g_bus_clk_names[] = {
...@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { ...@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
}, },
.bus_clk_names = dsi_6g_bus_clk_names, .bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd922800, 0xfd922b00 },
.num_dsi = 2,
}; };
static const char * const dsi_8916_bus_clk_names[] = { static const char * const dsi_8916_bus_clk_names[] = {
...@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { ...@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
}, },
.bus_clk_names = dsi_8916_bus_clk_names, .bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
.io_start = { 0x1a98000 },
.num_dsi = 1,
}; };
static const struct msm_dsi_config msm8994_dsi_cfg = { static const struct msm_dsi_config msm8994_dsi_cfg = {
...@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { ...@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
}, },
.bus_clk_names = dsi_6g_bus_clk_names, .bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd998000, 0xfd9a0000 },
.num_dsi = 2,
}; };
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
......
...@@ -34,6 +34,8 @@ struct msm_dsi_config { ...@@ -34,6 +34,8 @@ struct msm_dsi_config {
struct dsi_reg_config reg_cfg; struct dsi_reg_config reg_cfg;
const char * const *bus_clk_names; const char * const *bus_clk_names;
const int num_bus_clks; const int num_bus_clks;
const resource_size_t io_start[DSI_MAX];
const int num_dsi;
}; };
struct msm_dsi_cfg_handler { struct msm_dsi_cfg_handler {
......
...@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, ...@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
} }
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
data = msm_gem_vaddr(msm_host->tx_gem_obj); data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
if (IS_ERR(data)) { if (IS_ERR(data)) {
ret = PTR_ERR(data); ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret); pr_err("%s: get vaddr failed, %d\n", __func__, ret);
...@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, ...@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len) if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size); memset(data + packet.size, 0xff, len - packet.size);
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
msm_gem_put_vaddr(msm_host->tx_gem_obj);
return len; return len;
} }
...@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, ...@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
u32 lane_map[4]; u32 lane_map[4];
int ret, i, len, num_lanes; int ret, i, len, num_lanes;
prop = of_find_property(ep, "qcom,data-lane-map", &len); prop = of_find_property(ep, "data-lanes", &len);
if (!prop) { if (!prop) {
dev_dbg(dev, "failed to find data lane mapping\n"); dev_dbg(dev, "failed to find data lane mapping\n");
return -EINVAL; return -EINVAL;
...@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, ...@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
msm_host->num_data_lanes = num_lanes; msm_host->num_data_lanes = num_lanes;
ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map, ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes); num_lanes);
if (ret) { if (ret) {
dev_err(dev, "failed to read lane data\n"); dev_err(dev, "failed to read lane data\n");
...@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, ...@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
const int *swap = supported_data_lane_swaps[i]; const int *swap = supported_data_lane_swaps[i];
int j; int j;
/*
* the data-lanes array we get from DT has a logical->physical
* mapping. The "data lane swap" register field represents
* supported configurations in a physical->logical mapping.
* Translate the DT mapping to what we understand and find a
* configuration that works.
*/
for (j = 0; j < num_lanes; j++) { for (j = 0; j < num_lanes; j++) {
if (swap[j] != lane_map[j]) if (lane_map[j] < 0 || lane_map[j] > 3)
dev_err(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
break; break;
} }
...@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) ...@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
struct device_node *endpoint, *device_node; struct device_node *endpoint, *device_node;
int ret; int ret;
ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
if (ret) {
dev_err(dev, "%s: host index not specified, ret=%d\n",
__func__, ret);
return ret;
}
/* /*
* Get the first endpoint node. In our case, dsi has one output port * Get the endpoint of the output port of the DSI host. In our case,
* to which the panel is connected. Don't return an error if a port * this is mapped to port number with reg = 1. Don't return an error if
* isn't defined. It's possible that there is nothing connected to * the remote endpoint isn't defined. It's possible that there is
* the dsi output. * nothing connected to the dsi output.
*/ */
endpoint = of_graph_get_next_endpoint(np, NULL); endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) { if (!endpoint) {
dev_dbg(dev, "%s: no endpoint\n", __func__); dev_dbg(dev, "%s: no endpoint\n", __func__);
return 0; return 0;
...@@ -1648,6 +1655,25 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) ...@@ -1648,6 +1655,25 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
return ret; return ret;
} }
static int dsi_host_get_id(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
struct resource *res;
int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
if (!res)
return -EINVAL;
for (i = 0; i < cfg->num_dsi; i++) {
if (cfg->io_start[i] == res->start)
return i;
}
return -EINVAL;
}
int msm_dsi_host_init(struct msm_dsi *msm_dsi) int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{ {
struct msm_dsi_host *msm_host = NULL; struct msm_dsi_host *msm_host = NULL;
...@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) ...@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail; goto fail;
} }
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
ret = msm_host->id;
pr_err("%s: unable to identify DSI host index\n", __func__);
goto fail;
}
/* fixup base address by io offset */ /* fixup base address by io offset */
msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
...@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, ...@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
} }
msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
if (IS_ERR(msm_host->mode)) { if (!msm_host->mode) {
pr_err("%s: cannot duplicate mode\n", __func__); pr_err("%s: cannot duplicate mode\n", __func__);
return PTR_ERR(msm_host->mode); return -ENOMEM;
} }
return 0; return 0;
......
...@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = { ...@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{} {}
}; };
/*
* Currently, we only support one SoC for each PHY type. When we have multiple
* SoCs for the same PHY, we can try to make the index searching a bit more
* clever.
*/
static int dsi_phy_get_id(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
const struct msm_dsi_phy_cfg *cfg = phy->cfg;
struct resource *res;
int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
if (!res)
return -EINVAL;
for (i = 0; i < cfg->num_dsi_phy; i++) {
if (cfg->io_start[i] == res->start)
return i;
}
return -EINVAL;
}
static int dsi_phy_driver_probe(struct platform_device *pdev) static int dsi_phy_driver_probe(struct platform_device *pdev)
{ {
struct msm_dsi_phy *phy; struct msm_dsi_phy *phy;
...@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cfg = match->data; phy->cfg = match->data;
phy->pdev = pdev; phy->pdev = pdev;
ret = of_property_read_u32(dev->of_node, phy->id = dsi_phy_get_id(phy);
"qcom,dsi-phy-index", &phy->id); if (phy->id < 0) {
if (ret) { ret = phy->id;
dev_err(dev, "%s: PHY index not specified, %d\n", dev_err(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret); __func__, ret);
goto fail; goto fail;
} }
......
...@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg { ...@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
* Fill default H/W values in illegal cells, eg. cell {0, 1}. * Fill default H/W values in illegal cells, eg. cell {0, 1}.
*/ */
bool src_pll_truthtable[DSI_MAX][DSI_MAX]; bool src_pll_truthtable[DSI_MAX][DSI_MAX];
const resource_size_t io_start[DSI_MAX];
const int num_dsi_phy;
}; };
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
......
...@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { ...@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = { .ops = {
.enable = dsi_20nm_phy_enable, .enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable, .disable = dsi_20nm_phy_disable,
} },
.io_start = { 0xfd998300, 0xfd9a0300 },
.num_dsi_phy = 2,
}; };
...@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { ...@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
}, },
.io_start = { 0xfd922b00, 0xfd923100 },
.num_dsi_phy = 2,
}; };
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
...@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { ...@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
}, },
.io_start = { 0x1a98500 },
.num_dsi_phy = 1,
}; };
...@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { ...@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
}, },
.io_start = { 0x4700300, 0x5800300 },
.num_dsi_phy = 2,
}; };
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <sound/hdmi-codec.h>
#include "hdmi.h" #include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
...@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name) ...@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
return gpio; return gpio;
} }
/*
* HDMI audio codec callbacks
*/
static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct hdmi *hdmi = dev_get_drvdata(dev);
unsigned int chan;
unsigned int channel_allocation = 0;
unsigned int rate;
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
case 2:
/* FR and FL speakers */
channel_allocation = 0;
chan = MSM_HDMI_AUDIO_CHANNEL_2;
break;
case 4:
/* FC, LFE, FR and FL speakers */
channel_allocation = 0x3;
chan = MSM_HDMI_AUDIO_CHANNEL_4;
break;
case 6:
/* RR, RL, FC, LFE, FR and FL speakers */
channel_allocation = 0x0B;
chan = MSM_HDMI_AUDIO_CHANNEL_6;
break;
case 8:
/* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
channel_allocation = 0x1F;
chan = MSM_HDMI_AUDIO_CHANNEL_8;
break;
default:
return -EINVAL;
}
switch (params->sample_rate) {
case 32000:
rate = HDMI_SAMPLE_RATE_32KHZ;
break;
case 44100:
rate = HDMI_SAMPLE_RATE_44_1KHZ;
break;
case 48000:
rate = HDMI_SAMPLE_RATE_48KHZ;
break;
case 88200:
rate = HDMI_SAMPLE_RATE_88_2KHZ;
break;
case 96000:
rate = HDMI_SAMPLE_RATE_96KHZ;
break;
case 176400:
rate = HDMI_SAMPLE_RATE_176_4KHZ;
break;
case 192000:
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
dev_err(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
msm_hdmi_audio_set_sample_rate(hdmi, rate);
msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
level_shift, down_mix);
return 0;
}
static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi *hdmi = dev_get_drvdata(dev);
msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
}
static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
.hw_params = msm_hdmi_audio_hw_params,
.audio_shutdown = msm_hdmi_audio_shutdown,
};
static struct hdmi_codec_pdata codec_data = {
.ops = &msm_hdmi_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
{
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
}
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{ {
struct drm_device *drm = dev_get_drvdata(master); struct drm_device *drm = dev_get_drvdata(master);
...@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) ...@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
static struct hdmi_platform_config *hdmi_cfg; static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi; struct hdmi *hdmi;
struct device_node *of_node = dev->of_node; struct device_node *of_node = dev->of_node;
int i; int i, err;
hdmi_cfg = (struct hdmi_platform_config *) hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev); of_device_get_match_data(dev);
...@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) ...@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hdmi); return PTR_ERR(hdmi);
priv->hdmi = hdmi; priv->hdmi = hdmi;
err = msm_hdmi_register_audio_driver(hdmi, dev);
if (err) {
DRM_ERROR("Failed to attach an audio codec %d\n", err);
hdmi->audio_pdev = NULL;
}
return 0; return 0;
} }
...@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master, ...@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master); struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private; struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) { if (priv->hdmi) {
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
msm_hdmi_destroy(priv->hdmi); msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL; priv->hdmi = NULL;
} }
......
...@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl; ...@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
struct hdmi { struct hdmi {
struct drm_device *dev; struct drm_device *dev;
struct platform_device *pdev; struct platform_device *pdev;
struct platform_device *audio_pdev;
const struct hdmi_platform_config *config; const struct hdmi_platform_config *config;
...@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev) ...@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
/* /*
* audio: * audio:
*/ */
/* Supported HDMI Audio channels and rates */
#define MSM_HDMI_AUDIO_CHANNEL_2 0
#define MSM_HDMI_AUDIO_CHANNEL_4 1
#define MSM_HDMI_AUDIO_CHANNEL_6 2
#define MSM_HDMI_AUDIO_CHANNEL_8 3
#define HDMI_SAMPLE_RATE_32KHZ 0
#define HDMI_SAMPLE_RATE_44_1KHZ 1
#define HDMI_SAMPLE_RATE_48KHZ 2
#define HDMI_SAMPLE_RATE_88_2KHZ 3
#define HDMI_SAMPLE_RATE_96KHZ 4
#define HDMI_SAMPLE_RATE_176_4KHZ 5
#define HDMI_SAMPLE_RATE_192KHZ 6
int msm_hdmi_audio_update(struct hdmi *hdmi); int msm_hdmi_audio_update(struct hdmi *hdmi);
int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
......
...@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi) ...@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{ {
if (hdmi && hdmi->hdcp_ctrl) { if (hdmi) {
kfree(hdmi->hdcp_ctrl); kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL; hdmi->hdcp_ctrl = NULL;
} }
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
struct mdp4_dtv_encoder { struct mdp4_dtv_encoder {
struct drm_encoder base; struct drm_encoder base;
struct clk *src_clk;
struct clk *hdmi_clk; struct clk *hdmi_clk;
struct clk *mdp_clk; struct clk *mdp_clk;
unsigned long int pixclock; unsigned long int pixclock;
...@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) ...@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
*/ */
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
...@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) ...@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
bs_set(mdp4_dtv_encoder, 1); bs_set(mdp4_dtv_encoder, 1);
DBG("setting src_clk=%lu", pc); DBG("setting mdp_clk=%lu", pc);
ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret) if (ret)
dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
clk_prepare_enable(mdp4_dtv_encoder->src_clk); pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret) if (ret)
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
mdp4_dtv_encoder->enabled = true; mdp4_dtv_encoder->enabled = true;
...@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { ...@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{ {
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
return clk_round_rate(mdp4_dtv_encoder->src_clk, rate); return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
} }
/* initialize encoder */ /* initialize encoder */
...@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) ...@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
DRM_MODE_ENCODER_TMDS, NULL); DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
dev_err(dev->dev, "failed to get src_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
goto fail;
}
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
dev_err(dev->dev, "failed to get hdmi_clk\n"); dev_err(dev->dev, "failed to get hdmi_clk\n");
...@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) ...@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
goto fail; goto fail;
} }
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk"); mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
dev_err(dev->dev, "failed to get mdp_clk\n"); dev_err(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail; goto fail;
} }
......
...@@ -158,6 +158,7 @@ static const char * const iommu_ports[] = { ...@@ -158,6 +158,7 @@ static const char * const iommu_ports[] = {
static void mdp4_destroy(struct msm_kms *kms) static void mdp4_destroy(struct msm_kms *kms)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu; struct msm_mmu *mmu = mdp4_kms->mmu;
if (mmu) { if (mmu) {
...@@ -167,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms) ...@@ -167,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova) if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo) drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
kfree(mdp4_kms); kfree(mdp4_kms);
} }
...@@ -436,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -436,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms; struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL; struct msm_kms *kms = NULL;
struct msm_mmu *mmu; struct msm_mmu *mmu;
int ret; int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) { if (!mdp4_kms) {
...@@ -457,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -457,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
dev_err(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
kms->irq = irq;
/* NOTE: driver for this regulator still missing upstream.. use /* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist * _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us) * (and hope that the bootloader left it on for us)
...@@ -492,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -492,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) { if (IS_ERR(mdp4_kms->axi_clk)) {
dev_err(dev->dev, "failed to get axi_clk\n"); dev_err(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk); ret = PTR_ERR(mdp4_kms->axi_clk);
...@@ -502,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -502,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
/* make sure things are off before attaching iommu (bootloader could /* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if * have left things on, in which case we'll start getting faults if
* we don't disable): * we don't disable):
......
...@@ -47,6 +47,8 @@ struct mdp4_kms { ...@@ -47,6 +47,8 @@ struct mdp4_kms {
struct mdp_irq error_handler; struct mdp_irq error_handler;
bool rpm_enabled;
/* empty/blank cursor bo to use when cursor is "disabled" */ /* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo; struct drm_gem_object *blank_cursor_bo;
uint32_t blank_cursor_iova; uint32_t blank_cursor_iova;
......
This diff is collapsed.
This diff is collapsed.
...@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, ...@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
* start signal for the slave encoder * start signal for the slave encoder
*/ */
if (intf_num == 1) if (intf_num == 1)
data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
else if (intf_num == 2) else if (intf_num == 2)
data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
else else
return -EINVAL; return -EINVAL;
/* Smart Panel, Sync mode */ /* Smart Panel, Sync mode */
data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL; data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
/* Make sure clocks are on when connectors calling this function. */ /* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL); MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
return 0; return 0;
......
...@@ -490,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -490,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL; struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride; uint32_t blendcfg, cursor_addr, stride;
int ret, bpp, lm; int ret, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h; uint32_t roi_w, roi_h;
...@@ -521,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -521,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
lm = mdp5_crtc->lm; lm = mdp5_crtc->lm;
drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
stride = width * (bpp >> 3);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo; old_bo = mdp5_crtc->cursor.scanout_bo;
......
...@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms, ...@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
u32 intf_sel; u32 intf_sel;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags); spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0)); intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf->num) { switch (intf->num) {
case 0: case 0:
intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type); intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
break; break;
case 1: case 1:
intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type); intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
break; break;
case 2: case 2:
intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type); intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
break; break;
case 3: case 3:
intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type); intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
break; break;
default: default:
BUG(); BUG();
break; break;
} }
mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel); mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
} }
...@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) ...@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
if (!enable) { if (!enable) {
ctlx->pair = NULL; ctlx->pair = NULL;
ctly->pair = NULL; ctly->pair = NULL;
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0; return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
...@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) ...@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
ctlx->pair = ctly; ctlx->pair = ctly;
ctly->pair = ctlx; ctly->pair = ctlx;
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
return 0; return 0;
} }
......
...@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, ...@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
* to use the master's enable signal for the slave encoder. * to use the master's enable signal for the slave encoder.
*/ */
if (intf_num == 1) if (intf_num == 1)
data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC; data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
else if (intf_num == 2) else if (intf_num == 2)
data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC; data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
else else
return -EINVAL; return -EINVAL;
/* Make sure clocks are on when connectors calling this function. */ /* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
/* Dumb Panel, Sync mode */ /* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/irqdomain.h>
#include <linux/irq.h> #include <linux/irq.h>
#include "msm_drv.h" #include "msm_drv.h"
...@@ -24,9 +23,9 @@ ...@@ -24,9 +23,9 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask) uint32_t old_irqmask)
{ {
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
irqmask ^ (irqmask & old_irqmask)); irqmask ^ (irqmask & old_irqmask));
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
} }
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
...@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms) ...@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{ {
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff); mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
} }
...@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) ...@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN | MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN; MDP5_IRQ_INTF3_UNDER_RUN;
mdp5_enable(mdp5_kms);
mdp_irq_register(mdp_kms, error_handler); mdp_irq_register(mdp_kms, error_handler);
mdp5_disable(mdp5_kms);
return 0; return 0;
} }
...@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms) ...@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{ {
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
} }
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) irqreturn_t mdp5_irq(struct msm_kms *kms)
{ {
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev; struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
unsigned int id; unsigned int id;
uint32_t status, enable; uint32_t status, enable;
enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status); VERB("status=%08x", status);
...@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) ...@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
for (id = 0; id < priv->num_crtcs; id++) for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id])) if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id); drm_handle_vblank(dev, id);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
uint32_t intr;
intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
}
while (intr) {
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
mdp5_kms->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) ...@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp5_crtc_vblank(crtc), false); mdp5_crtc_vblank(crtc), false);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
} }
/*
* interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
* can register to get their irq's delivered
*/
#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
MDSS_HW_INTR_STATUS_INTR_DSI1 | \
MDSS_HW_INTR_STATUS_INTR_HDMI | \
MDSS_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdp5_hw_unmask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static struct irq_chip mdp5_hw_irq_chip = {
.name = "mdp5",
.irq_mask = mdp5_hw_mask_irq,
.irq_unmask = mdp5_hw_unmask_irq,
};
static int mdp5_hw_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
struct mdp5_kms *mdp5_kms = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, mdp5_kms);
return 0;
}
static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
.map = mdp5_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
{
struct device *dev = mdp5_kms->dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32,
&mdp5_hw_irqdomain_ops, mdp5_kms);
if (!d) {
dev_err(dev, "mdp5 irq domain add failed\n");
return -ENXIO;
}
mdp5_kms->irqcontroller.enabled_mask = 0;
mdp5_kms->irqcontroller.domain = d;
return 0;
}
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
{
if (mdp5_kms->irqcontroller.domain) {
irq_domain_remove(mdp5_kms->irqcontroller.domain);
mdp5_kms->irqcontroller.domain = NULL;
}
}
This diff is collapsed.
...@@ -31,6 +31,8 @@ struct mdp5_kms { ...@@ -31,6 +31,8 @@ struct mdp5_kms {
struct drm_device *dev; struct drm_device *dev;
struct platform_device *pdev;
struct mdp5_cfg_handler *cfg; struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
...@@ -43,29 +45,23 @@ struct mdp5_kms { ...@@ -43,29 +45,23 @@ struct mdp5_kms {
struct mdp5_ctl_manager *ctlm; struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */ /* io/register spaces: */
void __iomem *mmio, *vbif; void __iomem *mmio;
struct regulator *vdd;
struct clk *axi_clk; struct clk *axi_clk;
struct clk *ahb_clk; struct clk *ahb_clk;
struct clk *src_clk;
struct clk *core_clk; struct clk *core_clk;
struct clk *lut_clk; struct clk *lut_clk;
struct clk *vsync_clk; struct clk *vsync_clk;
/* /*
* lock to protect access to global resources: ie., following register: * lock to protect access to global resources: ie., following register:
* - REG_MDP5_MDP_DISP_INTF_SEL * - REG_MDP5_DISP_INTF_SEL
*/ */
spinlock_t resource_lock; spinlock_t resource_lock;
struct mdp_irq error_handler; bool rpm_enabled;
struct { struct mdp_irq error_handler;
volatile unsigned long enabled_mask;
struct irq_domain *domain;
} irqcontroller;
}; };
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
......
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
/*
* If needed, this can become more specific: something like struct mdp5_mdss,
* which contains a 'struct msm_mdss base' member.
*/
struct msm_mdss {
struct drm_device *dev;
void __iomem *mmio, *vbif;
struct regulator *vdd;
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
} irqcontroller;
};
static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
{
msm_writel(data, mdss->mmio + reg);
}
static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
{
return msm_readl(mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
struct msm_mdss *mdss = arg;
u32 intr;
intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
while (intr) {
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
return IRQ_HANDLED;
}
/*
* interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
* can register to get their irq's delivered
*/
#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
MDSS_HW_INTR_STATUS_INTR_DSI0 | \
MDSS_HW_INTR_STATUS_INTR_DSI1 | \
MDSS_HW_INTR_STATUS_INTR_HDMI | \
MDSS_HW_INTR_STATUS_INTR_EDP)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static struct irq_chip mdss_hw_irq_chip = {
.name = "mdss",
.irq_mask = mdss_hw_mask_irq,
.irq_unmask = mdss_hw_unmask_irq,
};
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct msm_mdss *mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, mdss);
return 0;
}
static struct irq_domain_ops mdss_hw_irqdomain_ops = {
.map = mdss_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
static int mdss_irq_domain_init(struct msm_mdss *mdss)
{
struct device *dev = mdss->dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
mdss->irqcontroller.enabled_mask = 0;
mdss->irqcontroller.domain = d;
return 0;
}
void msm_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_mdss *mdss = priv->mdss;
if (!mdss)
return;
irq_domain_remove(mdss->irqcontroller.domain);
mdss->irqcontroller.domain = NULL;
regulator_disable(mdss->vdd);
pm_runtime_put_sync(dev->dev);
pm_runtime_disable(dev->dev);
}
int msm_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_mdss *mdss;
int ret;
DBG("");
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
if (!mdss) {
ret = -ENOMEM;
goto fail;
}
mdss->dev = dev;
mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
if (IS_ERR(mdss->mmio)) {
ret = PTR_ERR(mdss->mmio);
goto fail;
}
mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
if (IS_ERR(mdss->vbif)) {
ret = PTR_ERR(mdss->vbif);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
mdss->vdd = devm_regulator_get(dev->dev, "vdd");
if (IS_ERR(mdss->vdd)) {
ret = PTR_ERR(mdss->vdd);
goto fail;
}
ret = regulator_enable(mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
priv->mdss = mdss;
pm_runtime_enable(dev->dev);
/*
* TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
* domain. Remove this once runtime PM is adapted for all the devices.
*/
pm_runtime_get_sync(dev->dev);
return 0;
fail_irq:
regulator_disable(mdss->vdd);
fail:
return ret;
}
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* *
* configured: * configured:
* The block is allocated to some client, and assigned to that * The block is allocated to some client, and assigned to that
* client in MDP5_MDP_SMP_ALLOC registers. * client in MDP5_SMP_ALLOC registers.
* *
* inuse: * inuse:
* The block is being actively used by a client. * The block is being actively used by a client.
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
* mdp5_smp_commit. * mdp5_smp_commit.
* *
* 2) mdp5_smp_configure(): * 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
* are configured for the union(pending, inuse) * are configured for the union(pending, inuse)
* Current pending is copied to configured. * Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
...@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp, ...@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3; int idx = blk / 3;
int fld = blk % 3; int fld = blk % 3;
val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx)); val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
switch (fld) { switch (fld) {
case 0: case 0:
val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid); val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break; break;
case 1: case 1:
val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid); val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break; break;
case 2: case 2:
val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid); val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break; break;
} }
mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val); mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val); mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
} }
} }
......
This diff is collapsed.
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
struct msm_kms; struct msm_kms;
struct msm_gpu; struct msm_gpu;
struct msm_mmu; struct msm_mmu;
struct msm_mdss;
struct msm_rd_state; struct msm_rd_state;
struct msm_perf_state; struct msm_perf_state;
struct msm_gem_submit; struct msm_gem_submit;
...@@ -77,11 +78,16 @@ struct msm_vblank_ctrl { ...@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
struct msm_drm_private { struct msm_drm_private {
struct drm_device *dev;
struct msm_kms *kms; struct msm_kms *kms;
/* subordinate devices, if present: */ /* subordinate devices, if present: */
struct platform_device *gpu_pdev; struct platform_device *gpu_pdev;
/* top level MDSS wrapper device (for MDP5 only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is /* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5.. * shared by both mdp4 and mdp5..
*/ */
...@@ -147,6 +153,9 @@ struct msm_drm_private { ...@@ -147,6 +153,9 @@ struct msm_drm_private {
struct drm_mm mm; struct drm_mm mm;
} vram; } vram;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl; struct msm_vblank_ctrl vblank_ctrl;
}; };
...@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit); ...@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data, int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj, int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma); struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
...@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj); void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj); void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive); struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
...@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, ...@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
} }
#endif #endif
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
......
...@@ -49,8 +49,8 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) ...@@ -49,8 +49,8 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i]; struct drm_gem_object *bo = msm_fb->planes[i];
if (bo)
drm_gem_object_unreference_unlocked(bo); drm_gem_object_unreference_unlocked(bo);
} }
kfree(msm_fb); kfree(msm_fb);
......
...@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr; dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
if (IS_ERR(fbi->screen_base)) { if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base); ret = PTR_ERR(fbi->screen_base);
goto fail_unlock; goto fail_unlock;
...@@ -251,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev) ...@@ -251,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */ /* this will free the backing object */
if (fbdev->fb) { if (fbdev->fb) {
msm_gem_put_vaddr(fbdev->bo);
drm_framebuffer_unregister_private(fbdev->fb); drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb); drm_framebuffer_remove(fbdev->fb);
} }
......
...@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) ...@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset; return offset;
} }
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
msm_obj->domain[id].iova = 0;
}
}
}
/* should be called under struct_mutex.. although it can be called /* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra * from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held. * iova ref if you know one is already held.
...@@ -388,7 +408,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ...@@ -388,7 +408,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret; return ret;
} }
void *msm_gem_vaddr_locked(struct drm_gem_object *obj) void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
...@@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) ...@@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
if (msm_obj->vaddr == NULL) if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
msm_obj->vmap_count++;
return msm_obj->vaddr; return msm_obj->vaddr;
} }
void *msm_gem_vaddr(struct drm_gem_object *obj) void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{ {
void *ret; void *ret;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
ret = msm_gem_vaddr_locked(obj); ret = msm_gem_get_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&obj->dev->struct_mutex);
return ret; return ret;
} }
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
mutex_lock(&obj->dev->struct_mutex);
msm_gem_put_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
}
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
return (msm_obj->madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
put_iova(obj);
msm_gem_vunmap(obj);
put_pages(obj);
msm_obj->madv = __MSM_MADV_PURGED;
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
msm_obj->vaddr = NULL;
}
/* must be called before _move_to_active().. */ /* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive) struct msm_fence_context *fctx, bool exclusive)
...@@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, ...@@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool exclusive, struct fence *fence) struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu; msm_obj->gpu = gpu;
if (exclusive) if (exclusive)
reservation_object_add_excl_fence(msm_obj->resv, fence); reservation_object_add_excl_fence(msm_obj->resv, fence);
...@@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct reservation_object_list *fobj; struct reservation_object_list *fobj;
struct fence *fence; struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node); uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
madv = " purged";
break;
case MSM_MADV_DONTNEED:
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
default:
madv = "";
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter, obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size); off, msm_obj->vaddr, obj->size, madv);
rcu_read_lock(); rcu_read_lock();
fobj = rcu_dereference(robj->fence); fobj = rcu_dereference(robj->fence);
...@@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) ...@@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj) void msm_gem_free_object(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
...@@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list); list_del(&msm_obj->mm_list);
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { put_iova(obj);
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
if (obj->import_attach) { if (obj->import_attach) {
if (msm_obj->vaddr) if (msm_obj->vaddr)
...@@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt); drm_prime_gem_destroy(obj, msm_obj->sgt);
} else { } else {
vunmap(msm_obj->vaddr); msm_gem_vunmap(obj);
put_pages(obj); put_pages(obj);
} }
...@@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags; msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
if (resv) { if (resv) {
msm_obj->resv = resv; msm_obj->resv = resv;
...@@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj; return obj;
fail: fail:
if (obj) drm_gem_object_unreference(obj);
drm_gem_object_unreference(obj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj; return obj;
fail: fail:
if (obj) drm_gem_object_unreference_unlocked(obj);
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -29,6 +29,16 @@ struct msm_gem_object { ...@@ -29,6 +29,16 @@ struct msm_gem_object {
uint32_t flags; uint32_t flags;
/**
* Advice: are the backing pages purgeable?
*/
uint8_t madv;
/**
* count of active vmap'ing
*/
uint8_t vmap_count;
/* And object is either: /* And object is either:
* inactive - on priv->inactive_list * inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at * active - on one one of the gpu's active_list.. well, at
...@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj) ...@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL; return msm_obj->gpu != NULL;
} }
#define MAX_CMDS 4 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and * associated with the cmdstream submission for synchronization (and
...@@ -95,7 +114,7 @@ struct msm_gem_submit { ...@@ -95,7 +114,7 @@ struct msm_gem_submit {
uint32_t size; /* in dwords */ uint32_t size; /* in dwords */
uint32_t iova; uint32_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */ uint32_t idx; /* cmdstream buffer idx in bos[] */
} cmd[MAX_CMDS]; } *cmd; /* array of size nr_cmds */
struct { struct {
uint32_t flags; uint32_t flags;
struct msm_gem_object *obj; struct msm_gem_object *obj;
......
...@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
void *msm_gem_prime_vmap(struct drm_gem_object *obj) void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{ {
return msm_gem_vaddr(obj); return msm_gem_get_vaddr(obj);
} }
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{ {
/* TODO msm_gem_vunmap() */ msm_gem_put_vaddr(obj);
} }
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
......
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gem.h"
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
*unlock = false;
} else {
*unlock = true;
}
return true;
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long count = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return 0;
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_purgeable(msm_obj))
count += msm_obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
return count;
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long freed = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (freed >= sc->nr_to_scan)
break;
if (is_purgeable(msm_obj)) {
msm_gem_purge(&msm_obj->base);
freed += msm_obj->base.size >> PAGE_SHIFT;
}
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
if (freed > 0)
pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
return freed;
}
static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned unmapped = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base);
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*
*/
if (++unmapped >= 15)
break;
}
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
pr_info_ratelimited("Purging %u vmaps\n", unmapped);
return NOTIFY_DONE;
}
/**
* msm_gem_shrinker_init - Initialize msm shrinker
* @dev_priv: msm device
*
* This function registers and sets up the msm shrinker.
*/
void msm_gem_shrinker_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&priv->shrinker));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
}
/**
* msm_gem_shrinker_cleanup - Clean up msm shrinker
* @dev_priv: msm device
*
* This function unregisters the msm shrinker.
*/
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker);
}
...@@ -29,10 +29,11 @@ ...@@ -29,10 +29,11 @@
#define BO_PINNED 0x2000 #define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev, static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, int nr) struct msm_gpu *gpu, int nr_bos, int nr_cmds)
{ {
struct msm_gem_submit *submit; struct msm_gem_submit *submit;
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
(nr_cmds * sizeof(*submit->cmd));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (!submit) if (!submit)
...@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu; submit->gpu = gpu;
submit->fence = NULL; submit->fence = NULL;
submit->pid = get_pid(task_pid(current)); submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
/* initially, until copy_from_user() and bo lookup succeeds: */ /* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0; submit->nr_bos = 0;
...@@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably /* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d.. * to do it page-by-page, w/ kmap() if not vmap()d..
*/ */
ptr = msm_gem_vaddr_locked(&obj->base); ptr = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) { if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr); ret = PTR_ERR(ptr);
...@@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off; last_offset = off;
} }
msm_gem_put_vaddr_locked(&obj->base);
return 0; return 0;
} }
...@@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pipe != MSM_PIPE_3D0) if (args->pipe != MSM_PIPE_3D0)
return -EINVAL; return -EINVAL;
if (args->nr_cmds > MAX_CMDS) ret = mutex_lock_interruptible(&dev->struct_mutex);
return -EINVAL; if (ret)
return ret;
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit)
return -ENOMEM;
mutex_lock(&dev->struct_mutex); submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
}
ret = submit_lookup_objects(submit, args, file); ret = submit_lookup_objects(submit, args, file);
if (ret) if (ret)
...@@ -462,6 +467,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -462,6 +467,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit_cleanup(submit); submit_cleanup(submit);
if (ret) if (ret)
msm_gem_submit_free(submit); msm_gem_submit_free(submit);
out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
...@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, ...@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL; return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa = sg_phys(sg) - sg->offset; dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset; size_t bytes = sg->length + sg->offset;
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot); ret = iommu_map(domain, da, pa, bytes, prot);
if (ret) if (ret)
...@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, ...@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes) if (unmapped < bytes)
return unmapped; return unmapped;
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes)); BUG_ON(!PAGE_ALIGNED(bytes));
......
...@@ -61,10 +61,8 @@ struct msm_kms_funcs { ...@@ -61,10 +61,8 @@ struct msm_kms_funcs {
struct msm_kms { struct msm_kms {
const struct msm_kms_funcs *funcs; const struct msm_kms_funcs *funcs;
/* irq handling: */ /* irq number to be passed on to drm_irq_install */
bool in_irq; int irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
}; };
static inline void msm_kms_init(struct msm_kms *kms, static inline void msm_kms_init(struct msm_kms *kms,
...@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms, ...@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */ #endif /* __MSM_KMS_H__ */
...@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf, ...@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos) size_t sz, loff_t *ppos)
{ {
struct msm_perf_state *perf = file->private_data; struct msm_perf_state *perf = file->private_data;
int n = 0, ret; int n = 0, ret = 0;
mutex_lock(&perf->read_lock); mutex_lock(&perf->read_lock);
...@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf, ...@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
} }
n = min((int)sz, perf->buftot - perf->bufpos); n = min((int)sz, perf->buftot - perf->bufpos);
ret = copy_to_user(buf, &perf->buf[perf->bufpos], n); if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
if (ret) ret = -EFAULT;
goto out; goto out;
}
perf->bufpos += n; perf->bufpos += n;
*ppos += n; *ppos += n;
......
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
* This bypasses drm_debugfs_create_files() mainly because we need to use * This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to * our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open. * do anything if userspace doesn't have the debugfs file open.
*
* The module-param "rd_full", which defaults to false, enables snapshotting
* all (non-written) buffers in the submit, rather than just cmdstream bo's.
* This is useful to capture the contents of (for example) vbo's or textures,
* or shader programs (if not emitted inline in cmdstream).
*/ */
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -40,6 +45,10 @@ ...@@ -40,6 +45,10 @@
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_gem.h" #include "msm_gem.h"
static bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
enum rd_sect_type { enum rd_sect_type {
RD_NONE, RD_NONE,
RD_TEST, /* ascii text */ RD_TEST, /* ascii text */
...@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf, ...@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
goto out; goto out;
n = min_t(int, sz, circ_count_to_end(&rd->fifo)); n = min_t(int, sz, circ_count_to_end(&rd->fifo));
ret = copy_to_user(buf, fptr, n); if (copy_to_user(buf, fptr, n)) {
if (ret) ret = -EFAULT;
goto out; goto out;
}
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
*ppos += n; *ppos += n;
...@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor) ...@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
kfree(rd); kfree(rd);
} }
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
uint32_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
buf = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(buf))
return;
if (iova) {
buf += iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->base.size;
}
rd_write_section(rd, RD_GPUADDR,
(uint32_t[2]){ iova, size }, 8);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base);
}
/* called under struct_mutex */ /* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit) void msm_rd_dump_submit(struct msm_gem_submit *submit)
{ {
...@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) ...@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
/* could be nice to have an option (module-param?) to snapshot if (rd_full) {
* all the bo's associated with the submit. Handy to see vtx for (i = 0; i < submit->nr_bos; i++) {
* buffers, etc. For now just the cmdstream bo's is enough. /* buffers that are written to probably don't start out
*/ * with anything interesting:
*/
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
continue;
snapshot_buf(rd, submit, i, 0, 0);
}
}
for (i = 0; i < submit->nr_cmds; i++) { for (i = 0; i < submit->nr_cmds; i++) {
uint32_t idx = submit->cmd[i].idx;
uint32_t iova = submit->cmd[i].iova; uint32_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */ uint32_t szd = submit->cmd[i].size; /* in dwords */
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(buf))
continue;
buf += iova - submit->bos[idx].iova; /* snapshot cmdstream bo's (if we haven't already): */
if (!rd_full) {
rd_write_section(rd, RD_GPUADDR, snapshot_buf(rd, submit, submit->cmd[i].idx,
(uint32_t[2]){ iova, szd * 4 }, 8); submit->cmd[i].iova, szd * 4);
rd_write_section(rd, RD_BUFFER_CONTENTS, }
buf, szd * 4);
switch (submit->cmd[i].type) { switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF: case MSM_SUBMIT_CMD_IB_TARGET_BUF:
......
...@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
goto fail; goto fail;
} }
ring->start = msm_gem_vaddr_locked(ring->bo); ring->start = msm_gem_get_vaddr_locked(ring->bo);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
goto fail; goto fail;
...@@ -59,7 +59,9 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -59,7 +59,9 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{ {
if (ring->bo) if (ring->bo) {
msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo); drm_gem_object_unreference_unlocked(ring->bo);
}
kfree(ring); kfree(ring);
} }
...@@ -201,6 +201,27 @@ struct drm_msm_wait_fence { ...@@ -201,6 +201,27 @@ struct drm_msm_wait_fence {
struct drm_msm_timespec timeout; /* in */ struct drm_msm_timespec timeout; /* in */
}; };
/* madvise provides a way to tell the kernel in case a buffers contents
* can be discarded under memory pressure, which is useful for userspace
* bo cache where we want to optimistically hold on to buffer allocate
* and potential mmap, but allow the pages to be discarded under memory
* pressure.
*
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
* In the WILLNEED case, 'retained' indicates to userspace whether the
* backing pages still exist.
*/
#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
#define __MSM_MADV_PURGED 2 /* internal state */
struct drm_msm_gem_madvise {
__u32 handle; /* in, GEM handle */
__u32 madv; /* in, MSM_MADV_x */
__u32 retained; /* out, whether backing store still exists */
};
#define DRM_MSM_GET_PARAM 0x00 #define DRM_MSM_GET_PARAM 0x00
/* placeholder: /* placeholder:
#define DRM_MSM_SET_PARAM 0x01 #define DRM_MSM_SET_PARAM 0x01
...@@ -211,7 +232,8 @@ struct drm_msm_wait_fence { ...@@ -211,7 +232,8 @@ struct drm_msm_wait_fence {
#define DRM_MSM_GEM_CPU_FINI 0x05 #define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06 #define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07 #define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_NUM_IOCTLS 0x08 #define DRM_MSM_GEM_MADVISE 0x08
#define DRM_MSM_NUM_IOCTLS 0x09
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) #define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
...@@ -220,6 +242,7 @@ struct drm_msm_wait_fence { ...@@ -220,6 +242,7 @@ struct drm_msm_wait_fence {
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini) #define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) #define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) #define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#if defined(__cplusplus) #if defined(__cplusplus)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment