Commit 3eda2f59 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next

A bit more for 4.12:
- GP10B support
- GP107 acceleration support

* 'linux-4.12' of git://github.com/skeggsb/linux: (23 commits)
  drm/nouveau/gpio: enable interrupts on cards with 32 gpio lines
  drm/nouveau/gr/gp107: initial support
  drm/nouveau/core: recognise GP10B chipset
  drm/nouveau/platform: support for probing GP10B
  drm/nouveau/platform: make VDD regulator optional
  drm/nouveau/gr: support for GP10B
  drm/nouveau/ibus: add GP10B support
  drm/nouveau/mc: add GP10B support
  drm/nouveau/fb: add GP10B support
  drm/nouveau/fifo: add GP10B support
  drm/nouveau/msgqueue: support for GP10B PMU firmware
  drm/nouveau/secboot: add GP10B support
  drm/nouveau/secboot/gm20b: specify MC base address as argument
  drm/nouveau/secboot: start LS firmware in post-run hook
  drm/nouveau/secboot: let LS post_run hooks return error
  drm/nouveau/secboot: pass instance to LS firmware loaders
  drm/nouveau/secboot: allow to boot multiple falcons
  drm/nouveau/imem/gk20a: Turn instmem lock into mutex
  drm/nouveau: initial support (display-only) for GP107
  drm/nouveau/kms/nv50: fix double dma_fence_put() when destroying plane state
  ...
parents fabe2be1 99a97a8b
......@@ -22,17 +22,14 @@
#ifndef __NVKM_CORE_MSGQUEUE_H
#define __NVKM_CORE_MSGQUEUE_H
#include <core/os.h>
struct nvkm_falcon;
#include <subdev/secboot.h>
struct nvkm_msgqueue;
enum nvkm_secboot_falcon;
/* Hopefully we will never have firmware arguments larger than that... */
#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
struct nvkm_msgqueue **);
void nvkm_msgqueue_del(struct nvkm_msgqueue **);
void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
......@@ -41,7 +38,6 @@ int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
enum nvkm_secboot_falcon);
int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
#endif
......@@ -42,6 +42,10 @@ struct nvkm_device_tegra_func {
* Whether the chip requires a reference clock
*/
bool require_ref_clk;
/*
* Whether the chip requires the VDD regulator
*/
bool require_vdd;
};
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
......
......@@ -68,4 +68,5 @@ int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
......@@ -44,4 +44,6 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
......@@ -97,6 +97,7 @@ int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
......
......@@ -7,4 +7,5 @@ int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
......@@ -29,4 +29,5 @@ int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
......@@ -55,10 +55,11 @@ struct nvkm_secboot {
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
......@@ -53,13 +53,21 @@ static int nouveau_platform_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
.require_vdd = true,
};
static const struct nvkm_device_tegra_func gm20b_platform_data = {
.iommu_bit = 34,
.require_vdd = true,
.require_ref_clk = true,
};
static const struct nvkm_device_tegra_func gp10b_platform_data = {
.iommu_bit = 36,
/* power provided by generic PM domains */
.require_vdd = false,
};
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
......@@ -69,6 +77,10 @@ static const struct of_device_id nouveau_platform_match[] = {
.compatible = "nvidia,gm20b",
.data = &gm20b_platform_data,
},
{
.compatible = "nvidia,gp10b",
.data = &gp10b_platform_data,
},
{ }
};
......
......@@ -1002,7 +1002,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
{
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
__drm_atomic_helper_plane_destroy_state(&asyw->state);
dma_fence_put(asyw->state.fence);
kfree(asyw);
}
......@@ -1014,7 +1013,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
asyw->state.fence = NULL;
asyw->interval = 1;
asyw->sema = armw->sema;
asyw->ntfy = armw->ntfy;
......@@ -2043,6 +2041,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
u32 hfrontp = mode->hsync_start - mode->hdisplay;
u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
u32 blankus;
struct nv50_head_mode *m = &asyh->mode;
m->h.active = mode->htotal;
......@@ -2056,9 +2055,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
m->v.blanks = m->v.active - vfrontp - 1;
/*XXX: Safe underestimate, even "0" works */
m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
m->v.blankus *= 1000;
m->v.blankus /= mode->clock;
blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
blankus *= 1000;
blankus /= mode->clock;
m->v.blankus = blankus;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
m->v.blank2e = m->v.active + m->v.synce + vbackp;
......
......@@ -714,7 +714,7 @@ nv4a_chipset = {
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
.mmu = nv44_mmu_new,
.mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
......@@ -2201,8 +2201,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
......@@ -2215,6 +2213,8 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.nvdec = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
......@@ -2235,8 +2235,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
......@@ -2249,6 +2247,8 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.nvdec = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
......@@ -2269,8 +2269,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
......@@ -2283,6 +2281,65 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.nvdec = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
.fb = gp102_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = gp102_ce_new,
.ce[1] = gp102_ce_new,
.ce[2] = gp102_ce_new,
.ce[3] = gp102_ce_new,
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
.nvdec = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gp10b_ibus_new,
.imem = gk20a_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp10b_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp10b_secboot_new,
.pmu = gm20b_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[2] = gp102_ce_new,
.dma = gf119_dma_new,
.fifo = gp10b_fifo_new,
.gr = gp10b_gr_new,
.sw = gf100_sw_new,
};
......@@ -2724,6 +2781,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
case 0x137: device->chip = &nv137_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
......
......@@ -28,9 +28,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
int ret;
ret = regulator_enable(tdev->vdd);
if (ret)
goto err_power;
if (tdev->vdd) {
ret = regulator_enable(tdev->vdd);
if (ret)
goto err_power;
}
ret = clk_prepare_enable(tdev->clk);
if (ret)
......@@ -67,7 +69,8 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
regulator_disable(tdev->vdd);
if (tdev->vdd)
regulator_disable(tdev->vdd);
err_power:
return ret;
}
......@@ -75,6 +78,8 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
static int
nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
{
int ret;
reset_control_assert(tdev->rst);
udelay(10);
......@@ -84,7 +89,13 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
clk_disable_unprepare(tdev->clk);
udelay(10);
return regulator_disable(tdev->vdd);
if (tdev->vdd) {
ret = regulator_disable(tdev->vdd);
if (ret)
return ret;
}
return 0;
}
static void
......@@ -264,10 +275,12 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
ret = PTR_ERR(tdev->vdd);
goto free;
if (func->require_vdd) {
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
ret = PTR_ERR(tdev->vdd);
goto free;
}
}
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
......
......@@ -14,6 +14,7 @@ nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
......
......@@ -83,4 +83,5 @@ extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
extern const struct nvkm_enum gm107_fifo_fault_engine[];
extern const struct nvkm_enum gp100_fifo_fault_engine[];
#endif
......@@ -24,7 +24,7 @@
#include "gk104.h"
#include "changk104.h"
static const struct nvkm_enum
const struct nvkm_enum
gp100_fifo_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk104.h"
#include "changk104.h"
static const struct gk104_fifo_func
gp10b_fifo = {
.fault.engine = gp100_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
.fault.hubclient = gk104_fifo_fault_hubclient,
.fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gp100_fifo_gpfifo_oclass,
NULL
},
};
int
gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
}
......@@ -33,6 +33,8 @@ nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/gp107.o
nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
......@@ -52,3 +54,4 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
nvkm-y += nvkm/engine/gr/ctxgp100.o
nvkm-y += nvkm/engine/gr/ctxgp102.o
nvkm-y += nvkm/engine/gr/ctxgp107.o
......@@ -106,6 +106,9 @@ void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gp100_grctx_generate_pagepool(struct gf100_grctx *);
extern const struct gf100_grctx_func gp102_grctx;
void gp102_grctx_generate_attrib(struct gf100_grctx *);
extern const struct gf100_grctx_func gp107_grctx;
/* context init value lists */
......
......@@ -29,7 +29,7 @@
* PGRAPH context implementation
******************************************************************************/
static void
void
gp102_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "ctxgf100.h"
#include <subdev/fb.h>
/*******************************************************************************
* PGRAPH context implementation
******************************************************************************/
const struct gf100_grctx_func
gp107_grctx = {
.main = gp100_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gm107_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0x180,
.bundle_token_limit = 0x300,
.pagepool = gp100_grctx_generate_pagepool,
.pagepool_size = 0x20000,
.attrib = gp102_grctx_generate_attrib,
.attrib_nr_max = 0x15de,
.attrib_nr = 0x540,
.alpha_nr_max = 0xc00,
.alpha_nr = 0x800,
};
......@@ -1463,25 +1463,27 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
int ret = 0;
u32 secboot_mask = 0;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
if (ret)
return ret;
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
if (ret)
return ret;
if (secboot_mask != 0) {
int ret = nvkm_secboot_reset(sb, secboot_mask);
if (ret)
return ret;
}
nvkm_mc_unk260(device, 1);
......
......@@ -125,6 +125,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_ppc_exceptions)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
void (*init_num_active_ltcs)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio;
struct {
......@@ -301,4 +302,8 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr);
void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
#endif
......@@ -40,6 +40,15 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
void
gp100_gr_init_num_active_ltcs(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
}
int
gp100_gr_init(struct gf100_gr *gr)
{
......@@ -81,8 +90,7 @@ gp100_gr_init(struct gf100_gr *gr)
}
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
gr->func->init_num_active_ltcs(gr);
gr->func->init_rop_active_fbps(gr);
if (gr->func->init_swdx_pes_mask)
......@@ -154,6 +162,7 @@ gp100_gr = {
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 2,
.grctx = &gp100_grctx,
......
......@@ -26,7 +26,7 @@
#include <nvif/class.h>
static void
void
gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
......@@ -47,6 +47,7 @@ gp102_gr = {
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 3,
.grctx = &gp102_grctx,
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
static const struct gf100_gr_func
gp107_gr = {
.init = gp100_gr_init,
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 1,
.grctx = &gp107_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, PASCAL_B, &gf100_fermi },
{ -1, -1, PASCAL_COMPUTE_B },
{}
}
};
int
gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
return gm200_gr_new_(&gp107_gr, device, index, pgr);
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
static void
gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
}
static const struct gf100_gr_func
gp10b_gr = {
.init = gp100_gr_init,
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 1,
.grctx = &gp102_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, PASCAL_A, &gf100_fermi },
{ -1, -1, PASCAL_COMPUTE_A },
{}
}
};
int
gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
return gm200_gr_new_(&gp10b_gr, device, index, pgr);
}
......@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
if (!nv31_mpeg_mthd(mpeg, mthd, data))
if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
......
......@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
if (!nv44_mpeg_mthd(subdev->device, mthd, data))
if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
......
......@@ -463,26 +463,49 @@ nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
}
int
nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
unsigned long falcon_mask)
{
if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
unsigned long falcon;
if (!queue || !queue->func->acr_func)
return -ENODEV;
return queue->func->acr_func->boot_falcon(queue, falcon);
/* Does the firmware support booting multiple falcons? */
if (queue->func->acr_func->boot_multiple_falcons)
return queue->func->acr_func->boot_multiple_falcons(queue,
falcon_mask);
/* Else boot all requested falcons individually */
if (!queue->func->acr_func->boot_falcon)
return -ENODEV;
for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
int ret = queue->func->acr_func->boot_falcon(queue, falcon);
if (ret)
return ret;
}
return 0;
}
int
nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
{
const struct nvkm_subdev *subdev = falcon->owner;
int ret = -EINVAL;
switch (version) {
case 0x0137c63d:
ret = msgqueue_0137c63d_new(falcon, queue);
ret = msgqueue_0137c63d_new(falcon, sb, queue);
break;
case 0x0137bca5:
ret = msgqueue_0137bca5_new(falcon, sb, queue);
break;
case 0x0148cdec:
ret = msgqueue_0148cdec_new(falcon, queue);
ret = msgqueue_0148cdec_new(falcon, sb, queue);
break;
default:
nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
......
......@@ -101,9 +101,11 @@ struct nvkm_msgqueue_init_func {
* struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
*
* @boot_falcon: build and send the command to reset a given falcon
* @boot_multiple_falcons: build and send the command to reset several falcons
*/
struct nvkm_msgqueue_acr_func {
int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
};
struct nvkm_msgqueue_func {
......@@ -201,7 +203,11 @@ int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
struct nvkm_msgqueue_queue *);
int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
struct nvkm_msgqueue **);
int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
struct nvkm_msgqueue **);
int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
struct nvkm_msgqueue **);
#endif
......@@ -43,6 +43,15 @@ struct msgqueue_0137c63d {
#define msgqueue_0137c63d(q) \
container_of(q, struct msgqueue_0137c63d, base)
struct msgqueue_0137bca5 {
struct msgqueue_0137c63d base;
u64 wpr_addr;
};
#define msgqueue_0137bca5(q) \
container_of(container_of(q, struct msgqueue_0137c63d, base), \
struct msgqueue_0137bca5, base);
static struct nvkm_msgqueue_queue *
msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
enum msgqueue_msg_priority priority)
......@@ -180,6 +189,7 @@ msgqueue_0137c63d_init_func = {
enum {
ACR_CMD_INIT_WPR_REGION = 0x00,
ACR_CMD_BOOTSTRAP_FALCON = 0x01,
ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
};
static void
......@@ -286,11 +296,81 @@ acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
return 0;
}
static void
acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
struct nvkm_msgqueue_hdr *hdr)
{
struct acr_bootstrap_falcon_msg {
struct nvkm_msgqueue_msg base;
u32 falcon_mask;
} *msg = (void *)hdr;
const struct nvkm_subdev *subdev = priv->falcon->owner;
unsigned long falcon_mask = msg->falcon_mask;
u32 falcon_id, falcon_treated = 0;
for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
nvkm_debug(subdev, "%s booted\n",
nvkm_secboot_falcon_name[falcon_id]);
falcon_treated |= BIT(falcon_id);
}
if (falcon_treated != msg->falcon_mask) {
nvkm_error(subdev, "in bootstrap falcon callback:\n");
nvkm_error(subdev, "invalid falcon mask 0x%x\n",
msg->falcon_mask);
return;
}
}
static int
acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
{
DECLARE_COMPLETION_ONSTACK(completed);
/*
* flags - Flag specifying RESET or no RESET.
* falcon id - Falcon id specifying falcon to bootstrap.
*/
struct {
struct nvkm_msgqueue_hdr hdr;
u8 cmd_type;
u32 flags;
u32 falcon_mask;
u32 use_va_mask;
u32 wpr_lo;
u32 wpr_hi;
} cmd;
struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
cmd.hdr.size = sizeof(cmd);
cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
cmd.falcon_mask = falcon_mask;
cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
acr_boot_multiple_falcons_callback, &completed, true);
if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
return -ETIMEDOUT;
return 0;
}
static const struct nvkm_msgqueue_acr_func
msgqueue_0137c63d_acr_func = {
.boot_falcon = acr_boot_falcon,
};
static const struct nvkm_msgqueue_acr_func
msgqueue_0137bca5_acr_func = {
.boot_falcon = acr_boot_falcon,
.boot_multiple_falcons = acr_boot_multiple_falcons,
};
static void
msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
{
......@@ -307,7 +387,8 @@ msgqueue_0137c63d_func = {
};
int
msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
struct nvkm_msgqueue **queue)
{
struct msgqueue_0137c63d *ret;
......@@ -321,3 +402,35 @@ msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
return 0;
}
static const struct nvkm_msgqueue_func
msgqueue_0137bca5_func = {
.init_func = &msgqueue_0137c63d_init_func,
.acr_func = &msgqueue_0137bca5_acr_func,
.cmd_queue = msgqueue_0137c63d_cmd_queue,
.recv = msgqueue_0137c63d_process_msgs,
.dtor = msgqueue_0137c63d_dtor,
};
int
msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
struct nvkm_msgqueue **queue)
{
struct msgqueue_0137bca5 *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return -ENOMEM;
*queue = &ret->base.base;
/*
* FIXME this must be set to the address of a *GPU* mapping within the
* ACR address space!
*/
/* ret->wpr_addr = sb->wpr_addr; */
nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
return 0;
}
......@@ -247,7 +247,8 @@ msgqueue_0148cdec_func = {
};
int
msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
struct nvkm_msgqueue **queue)
{
struct msgqueue_0148cdec *ret;
......
......@@ -28,6 +28,7 @@ nvkm-y += nvkm/subdev/fb/gm200.o
nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
......
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
static const struct nvkm_fb_func
gp10b_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gp10b_fb, device, index, pfb);
}
......@@ -164,7 +164,7 @@ static int
nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
u32 mask = (1 << gpio->func->lines) - 1;
u32 mask = (1ULL << gpio->func->lines) - 1;
gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
gpio->func->intr_stat(gpio, &mask, &mask);
......
......@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ibus/gf117.o
nvkm-y += nvkm/subdev/ibus/gk104.o
nvkm-y += nvkm/subdev/ibus/gk20a.o
nvkm-y += nvkm/subdev/ibus/gm200.o
nvkm-y += nvkm/subdev/ibus/gp10b.o
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <subdev/ibus.h>
#include "priv.h"
static int
gp10b_ibus_init(struct nvkm_subdev *ibus)
{
struct nvkm_device *device = ibus->device;
nvkm_wr32(device, 0x1200a8, 0x0);
/* init ring */
nvkm_wr32(device, 0x12004c, 0x4);
nvkm_wr32(device, 0x122204, 0x2);
nvkm_rd32(device, 0x122204);
/* timeout configuration */
nvkm_wr32(device, 0x009080, 0x800186a0);
return 0;
}
static const struct nvkm_subdev_func
gp10b_ibus = {
.init = gp10b_ibus_init,
.intr = gk104_ibus_intr,
};
int
gp10b_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
return 0;
}
......@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */
spinlock_t lock;
struct mutex lock;
/* CPU mappings LRU */
unsigned int vaddr_use;
......@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
unsigned long flags;
nvkm_ltc_flush(ltc);
spin_lock_irqsave(&imem->lock, flags);
mutex_lock(&imem->lock);
if (node->base.vaddr) {
if (!node->use_cpt) {
......@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
out:
node->use_cpt++;
spin_unlock_irqrestore(&imem->lock, flags);
mutex_unlock(&imem->lock);
return node->base.vaddr;
}
......@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
unsigned long flags;
spin_lock_irqsave(&imem->lock, flags);
mutex_lock(&imem->lock);
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
......@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out:
spin_unlock_irqrestore(&imem->lock, flags);
mutex_unlock(&imem->lock);
wmb();
nvkm_ltc_invalidate(ltc);
......@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem;
unsigned long flags;
int i;
if (unlikely(!r))
goto out;
spin_lock_irqsave(&imem->lock, flags);
mutex_lock(&imem->lock);
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
spin_unlock_irqrestore(&imem->lock, flags);
mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
......@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
spin_lock_init(&imem->lock);
mutex_init(&imem->lock);
*pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */
......
......@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
......@@ -42,7 +42,7 @@ gp100_mc_intr_update(struct gp100_mc *mc)
}
}
static void
void
gp100_mc_intr_unarm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
......@@ -53,7 +53,7 @@ gp100_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
static void
void
gp100_mc_intr_rearm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
......@@ -64,7 +64,7 @@ gp100_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
static void
void
gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct gp100_mc *mc = gp100_mc(base);
......@@ -87,13 +87,14 @@ gp100_mc = {
};
int
gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
int index, struct nvkm_mc **pmc)
{
struct gp100_mc *mc;
if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
nvkm_mc_ctor(func, device, index, &mc->base);
*pmc = &mc->base;
spin_lock_init(&mc->lock);
......@@ -101,3 +102,9 @@ gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
mc->mask = 0x7fffffff;
return 0;
}
int
gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
{
return gp100_mc_new_(&gp100_mc, device, index, pmc);
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
void
gp10b_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
nvkm_wr32(device, 0x00020c, 0xffffffff); /* everything out of ELPG */
}
static const struct nvkm_mc_func
gp10b_mc = {
.init = gp10b_mc_init,
.intr = gk104_mc_intr,
.intr_unarm = gp100_mc_intr_unarm,
.intr_rearm = gp100_mc_intr_rearm,
.intr_mask = gp100_mc_intr_mask,
.intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
};
int
gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
{
return gp100_mc_new_(&gp10b_mc, device, index, pmc);
}
......@@ -41,12 +41,18 @@ extern const struct nvkm_mc_map nv17_mc_reset[];
void nv44_mc_init(struct nvkm_mc *);
void nv50_mc_init(struct nvkm_mc *);
void gk104_mc_init(struct nvkm_mc *);
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
void gp100_mc_intr_unarm(struct nvkm_mc *);
void gp100_mc_intr_rearm(struct nvkm_mc *);
void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
struct nvkm_mc **);
extern const struct nvkm_mc_map gk104_mc_intr[];
extern const struct nvkm_mc_map gk104_mc_reset[];
......
......@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
nvkm-y += nvkm/subdev/secboot/gp10b.o
......@@ -39,8 +39,7 @@ struct nvkm_acr_func {
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64);
int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
};
/**
......
......@@ -26,8 +26,6 @@
#include <core/gpuobj.h>
#include <core/firmware.h>
#include <engine/falcon.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/pmu.h>
#include <core/msgqueue.h>
#include <engine/sec2.h>
......@@ -241,6 +239,7 @@ struct ls_ucode_img_r352 {
*/
struct ls_ucode_img *
acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
......@@ -253,7 +252,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
......@@ -462,12 +461,14 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
* will be copied into the WPR region by the HS firmware.
*/
static int
acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
struct list_head imgs;
struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons;
u64 wpr_addr = sb->wpr_addr;
u32 wpr_size = sb->wpr_size;
int managed_count = 0;
u32 image_wpr_size, ls_blob_size;
int falcon_id;
......@@ -479,7 +480,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
struct ls_ucode_img *img;
img = acr->func->ls_ucode_img_load(acr, falcon_id);
img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
if (IS_ERR(img)) {
if (acr->base.optional_falcons & BIT(falcon_id)) {
managed_falcons &= ~BIT(falcon_id);
......@@ -704,7 +705,7 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
return 0;
/* Load and prepare the managed falcon's firmwares */
ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
ret = acr_r352_prepare_ls_blob(acr, sb);
if (ret)
return ret;
......@@ -882,7 +883,6 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = &sb->subdev;
unsigned long managed_falcons = acr->base.managed_falcons;
u32 reg;
int falcon_id;
int ret;
......@@ -917,54 +917,13 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
const struct acr_r352_ls_func *func =
acr->func->ls_func[falcon_id];
if (func->post_run)
func->post_run(&acr->base, sb);
}
/* Re-start ourselves if we are managed */
if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
return 0;
/* Enable interrupts */
nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
/* Start LS firmware on boot falcon */
nvkm_falcon_start(sb->boot_falcon);
/*
* There is a bug where the LS firmware sometimes require to be started
* twice (this happens only on SEC). Detect and workaround that
* condition.
*
* Once started, the falcon will end up in STOPPED condition (bit 5)
* if successful, or in HALT condition (bit 4) if not.
*/
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
sb->boot_falcon->addr + 0x100)
& 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_debug(subdev, "applying workaround for start bug...");
nvkm_falcon_start(sb->boot_falcon);
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
sb->boot_falcon->addr + 0x100)
& 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_error(subdev, "%s failed to start\n",
nvkm_secboot_falcon_name[acr->base.boot_falcon]);
return -EINVAL;
if (func->post_run) {
ret = func->post_run(&acr->base, sb);
if (ret)
return ret;
}
}
nvkm_debug(subdev, "%s started\n",
nvkm_secboot_falcon_name[acr->base.boot_falcon]);
return 0;
}
......@@ -976,15 +935,16 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
*/
static int
acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon)
unsigned long falcon_mask)
{
int falcon;
int ret;
/*
* Perform secure boot each time we are called on FECS. Since only FECS
* and GPCCS are managed and started together, this ought to be safe.
*/
if (falcon != NVKM_SECBOOT_FALCON_FECS)
if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
goto end;
ret = acr_r352_shutdown(acr, sb);
......@@ -996,7 +956,9 @@ acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
return ret;
end:
acr->falcon_state[falcon] = RESET;
for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
acr->falcon_state[falcon] = RESET;
}
return 0;
}
......@@ -1009,11 +971,11 @@ acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
*/
static int
acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon)
unsigned long falcon_mask)
{
struct acr_r352 *acr = acr_r352(_acr);
struct nvkm_msgqueue *queue;
const char *fname = nvkm_secboot_falcon_name[falcon];
int falcon;
bool wpr_already_set = sb->wpr_set;
int ret;
......@@ -1026,7 +988,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
/* Redo secure boot entirely if it was already done */
if (wpr_already_set)
return acr_r352_reset_nopmu(acr, sb, falcon);
return acr_r352_reset_nopmu(acr, sb, falcon_mask);
/* Else return the result of the initial invokation */
else
return ret;
......@@ -1044,13 +1006,15 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
}
/* Otherwise just ask the LS firmware to reset the falcon */
nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
nvkm_debug(&sb->subdev, "resetting %s falcon\n",
nvkm_secboot_falcon_name[falcon]);
ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
if (ret) {
nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
return ret;
}
nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
nvkm_debug(&sb->subdev, "falcon reset done\n");
return 0;
}
......
......@@ -57,11 +57,11 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
* @lhdr_flags: LS flags
*/
struct acr_r352_ls_func {
int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *);
int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
......@@ -82,6 +82,7 @@ struct acr_r352_func {
bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
......@@ -145,6 +146,7 @@ struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
enum nvkm_secboot_falcon, unsigned long);
struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
......
......@@ -107,6 +107,7 @@ struct ls_ucode_img_r367 {
struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
......@@ -119,7 +120,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
......
......@@ -28,6 +28,7 @@
void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
......
......@@ -102,15 +102,15 @@ nvkm_secboot_falcon_name[] = {
* nvkm_secboot_reset() - reset specified falcon
*/
int
nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
{
/* Unmanaged falcon? */
if (!(BIT(falcon) & sb->acr->managed_falcons)) {
if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
return -EINVAL;
}
return sb->acr->func->reset(sb->acr, sb, falcon);
return sb->acr->func->reset(sb->acr, sb, falcon_mask);
}
/**
......
......@@ -41,4 +41,7 @@ void *gm200_secboot_dtor(struct nvkm_secboot *);
int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
/* Tegra-only */
int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
#endif
......@@ -23,28 +23,29 @@
#include "acr.h"
#include "gm200.h"
#define TEGRA210_MC_BASE 0x70019000
#ifdef CONFIG_ARCH_TEGRA
#define TEGRA_MC_BASE 0x70019000
#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
/**
* sb_tegra_read_wpr() - read the WPR registers on Tegra
* gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
*
* On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
* is reserved from system memory by the bootloader and irreversibly locked.
* This function reads the address and size of the pre-configured WPR region.
*/
static int
gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
int
gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
struct nvkm_secboot *sb = &gsb->base;
void __iomem *mc;
u32 cfg;
mc = ioremap(TEGRA_MC_BASE, 0xd00);
mc = ioremap(mc_base, 0xd00);
if (!mc) {
nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
return PTR_ERR(mc);
......@@ -70,8 +71,8 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
return 0;
}
#else
static int
gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
int
gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
return -EINVAL;
......@@ -84,7 +85,7 @@ gm20b_secboot_oneinit(struct nvkm_secboot *sb)
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20b_tegra_read_wpr(gsb);
ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
if (ret)
return ret;
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
#define TEGRA186_MC_BASE 0x02c10000
static int
gp10b_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
if (ret)
return ret;
return gm200_secboot_oneinit(sb);
}
static const struct nvkm_secboot_func
gp10b_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gp10b_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gm200_secboot_run_blob,
};
int
gp10b_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr))
return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}
MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
......@@ -147,11 +147,11 @@ struct fw_bl_desc {
u32 data_size;
};
int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
......@@ -144,15 +144,13 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
}
int
acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
return ls_ucode_img_load_gr(subdev, img, "fecs");
return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
}
int
acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
return ls_ucode_img_load_gr(subdev, img, "gpccs");
return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
}
......@@ -28,6 +28,8 @@
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
/**
* acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
......@@ -73,10 +75,11 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
return 0;
}
static void
static int
acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
struct nvkm_falcon *falcon, u32 addr_args)
{
struct nvkm_device *device = falcon->owner->device;
u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
u8 buf[cmdline_size];
......@@ -85,65 +88,118 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
/* rearm the queue so it will wait for the init message */
nvkm_msgqueue_reinit(queue);
/* Enable interrupts */
nvkm_falcon_wr32(falcon, 0x10, 0xff);
nvkm_mc_intr_mask(device, falcon->owner->index, true);
/* Start LS firmware on boot falcon */
nvkm_falcon_start(falcon);
return 0;
}
int
acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
struct nvkm_pmu *pmu = subdev->device->pmu;
struct nvkm_pmu *pmu = sb->subdev.device->pmu;
int ret;
ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
&pmu->queue);
sb, &pmu->queue);
if (ret)
return ret;
return 0;
}
void
int
acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
struct nvkm_pmu *pmu = device->pmu;
u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
int ret;
ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
if (ret)
return ret;
acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
nvkm_debug(&sb->subdev, "%s started\n",
nvkm_secboot_falcon_name[acr->boot_falcon]);
return 0;
}
int
acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
struct nvkm_sec2 *sec = subdev->device->sec2;
struct nvkm_sec2 *sec = sb->subdev.device->sec2;
int ret;
ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
&sec->queue);
sb, &sec->queue);
if (ret)
return ret;
return 0;
}
void
int
acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
const struct nvkm_subdev *subdev = &sb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_sec2 *sec = device->sec2;
/* on SEC arguments are always at the beginning of EMEM */
u32 addr_args = 0x01000000;
const u32 addr_args = 0x01000000;
u32 reg;
int ret;
acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
if (ret)
return ret;
/*
* There is a bug where the LS firmware sometimes require to be started
* twice (this happens only on SEC). Detect and workaround that
* condition.
*
* Once started, the falcon will end up in STOPPED condition (bit 5)
* if successful, or in HALT condition (bit 4) if not.
*/
nvkm_msec(device, 1,
if ((reg = nvkm_falcon_rd32(sb->boot_falcon, 0x100) & 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_debug(subdev, "applying workaround for start bug...");
nvkm_falcon_start(sb->boot_falcon);
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
sb->boot_falcon->addr + 0x100)
& 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_error(subdev, "%s failed to start\n",
nvkm_secboot_falcon_name[acr->boot_falcon]);
return -EINVAL;
}
}
nvkm_debug(&sb->subdev, "%s started\n",
nvkm_secboot_falcon_name[acr->boot_falcon]);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment