Commit 6796b129 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next

- Re-architecture of the code to handle proprietary fw, more abstracted
to support the multitude of differences that NVIDIA introduce
- Support in the said code for GP10x ACR and GR fw, giving acceleration
support \o/
- Fix for GTX 970 GPUs that are in an odd MMU configuration

* 'linux-4.12' of git://github.com/skeggsb/linux: (60 commits)
  drm/nouveau/fb/gf100-: rework ram detection
  drm/nouveau/fb/gm200: split ram implementation from gm107
  drm/nouveau/fb/gf108: split implementation from gf100
  drm/nouveau/fb/gf100-: modify constructors to allow more customisation
  drm/nouveau/kms/nv50: use drm core i2c-over-aux algorithm
  drm/nouveau/i2c/g94-: return REPLY_M value on reads
  drm/nouveau/i2c: modify aux interface to return length actually transferred
  drm/nouveau/gp10x: enable secboot and GR
  drm/nouveau/gr/gp102: initial support
  drm/nouveau/falcon: support for gp10x msgqueue
  drm/nouveau/secboot: add gp102/gp104/gp106/gp107 support
  drm/nouveau/secboot: put HS code loading code into own file
  drm/nouveau/secboot: support for r375 ACR
  drm/nouveau/secboot: support for r367 ACR
  drm/nouveau/secboot: support for r364 ACR
  drm/nouveau/secboot: workaround bug when starting SEC2 firmware
  drm/nouveau/secboot: support standard NVIDIA HS binaries
  drm/nouveau/secboot: support for unload blob bootloader
  drm/nouveau/secboot: let callers interpret return value of blobs
  drm/nouveau/secboot: support for different load and unload falcons
  ...
parents 2e161017 97e5268d
...@@ -125,6 +125,7 @@ ...@@ -125,6 +125,7 @@
#define MAXWELL_B /* cl9097.h */ 0x0000b197 #define MAXWELL_B /* cl9097.h */ 0x0000b197
#define PASCAL_A /* cl9097.h */ 0x0000c097 #define PASCAL_A /* cl9097.h */ 0x0000c097
#define PASCAL_B /* cl9097.h */ 0x0000c197
#define NV74_BSP 0x000074b0 #define NV74_BSP 0x000074b0
...@@ -163,6 +164,7 @@ ...@@ -163,6 +164,7 @@
#define MAXWELL_COMPUTE_A 0x0000b0c0 #define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0 #define MAXWELL_COMPUTE_B 0x0000b1c0
#define PASCAL_COMPUTE_A 0x0000c0c0 #define PASCAL_COMPUTE_A 0x0000c0c0
#define PASCAL_COMPUTE_B 0x0000c1c0
#define NV74_CIPHER 0x000074c1 #define NV74_CIPHER 0x000074c1
#endif #endif
...@@ -59,6 +59,7 @@ enum nvkm_devidx { ...@@ -59,6 +59,7 @@ enum nvkm_devidx {
NVKM_ENGINE_NVDEC, NVKM_ENGINE_NVDEC,
NVKM_ENGINE_PM, NVKM_ENGINE_PM,
NVKM_ENGINE_SEC, NVKM_ENGINE_SEC,
NVKM_ENGINE_SEC2,
NVKM_ENGINE_SW, NVKM_ENGINE_SW,
NVKM_ENGINE_VIC, NVKM_ENGINE_VIC,
NVKM_ENGINE_VP, NVKM_ENGINE_VP,
...@@ -155,9 +156,10 @@ struct nvkm_device { ...@@ -155,9 +156,10 @@ struct nvkm_device {
struct nvkm_engine *msppp; struct nvkm_engine *msppp;
struct nvkm_engine *msvld; struct nvkm_engine *msvld;
struct nvkm_engine *nvenc[3]; struct nvkm_engine *nvenc[3];
struct nvkm_engine *nvdec; struct nvkm_nvdec *nvdec;
struct nvkm_pm *pm; struct nvkm_pm *pm;
struct nvkm_engine *sec; struct nvkm_engine *sec;
struct nvkm_sec2 *sec2;
struct nvkm_sw *sw; struct nvkm_sw *sw;
struct nvkm_engine *vic; struct nvkm_engine *vic;
struct nvkm_engine *vp; struct nvkm_engine *vp;
...@@ -225,9 +227,10 @@ struct nvkm_device_chip { ...@@ -225,9 +227,10 @@ struct nvkm_device_chip {
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **); int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_CORE_MSGQUEUE_H
#define __NVKM_CORE_MSGQUEUE_H
#include <core/os.h>
struct nvkm_falcon;
struct nvkm_msgqueue;
enum nvkm_secboot_falcon;
/* Hopefully we will never have firmware arguments larger than that... */
#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
void nvkm_msgqueue_del(struct nvkm_msgqueue **);
void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
/* useful if we run a NVIDIA-signed firmware */
void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
enum nvkm_secboot_falcon);
#endif
...@@ -10,6 +10,7 @@ enum nvkm_falcon_dmaidx { ...@@ -10,6 +10,7 @@ enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_PHYS_VID = 2, FALCON_DMAIDX_PHYS_VID = 2,
FALCON_DMAIDX_PHYS_SYS_COH = 3, FALCON_DMAIDX_PHYS_SYS_COH = 3,
FALCON_DMAIDX_PHYS_SYS_NCOH = 4, FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
FALCON_SEC2_DMAIDX_UCODE = 6,
}; };
struct nvkm_falcon { struct nvkm_falcon {
...@@ -19,11 +20,13 @@ struct nvkm_falcon { ...@@ -19,11 +20,13 @@ struct nvkm_falcon {
u32 addr; u32 addr;
struct mutex mutex; struct mutex mutex;
struct mutex dmem_mutex;
const struct nvkm_subdev *user; const struct nvkm_subdev *user;
u8 version; u8 version;
u8 secret; u8 secret;
bool debug; bool debug;
bool has_emem;
struct nvkm_memory *core; struct nvkm_memory *core;
bool external; bool external;
...@@ -45,8 +48,14 @@ struct nvkm_falcon { ...@@ -45,8 +48,14 @@ struct nvkm_falcon {
struct nvkm_engine engine; struct nvkm_engine engine;
}; };
/* This constructor must be called from the owner's oneinit() hook and
* *not* its constructor. This is to ensure that DEVINIT has been
* completed, and that the device is correctly enabled before we touch
* falcon registers.
*/
int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
struct nvkm_falcon **); struct nvkm_falcon **);
void nvkm_falcon_del(struct nvkm_falcon **); void nvkm_falcon_del(struct nvkm_falcon **);
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
......
...@@ -43,4 +43,5 @@ int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); ...@@ -43,4 +43,5 @@ int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif #endif
#ifndef __NVKM_NVDEC_H__ #ifndef __NVKM_NVDEC_H__
#define __NVKM_NVDEC_H__ #define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
#include <core/engine.h> #include <core/engine.h>
struct nvkm_nvdec {
struct nvkm_engine engine;
struct nvkm_falcon *falcon;
};
int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif #endif
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
#include <core/engine.h>
struct nvkm_sec2 {
struct nvkm_engine engine;
struct nvkm_falcon *falcon;
struct nvkm_msgqueue *queue;
struct work_struct work;
};
int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
...@@ -89,6 +89,7 @@ int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **); ...@@ -89,6 +89,7 @@ int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
...@@ -146,6 +147,12 @@ struct nvkm_ram { ...@@ -146,6 +147,12 @@ struct nvkm_ram {
}; };
struct nvkm_ram_func { struct nvkm_ram_func {
u64 upper;
u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
int fbp, int *pltcs);
u32 (*probe_fbp_amount)(const struct nvkm_ram_func *, u32 fbpao,
struct nvkm_device *, int fbp, int *pltcs);
u32 (*probe_fbpa_amount)(struct nvkm_device *, int fbpa);
void *(*dtor)(struct nvkm_ram *); void *(*dtor)(struct nvkm_ram *);
int (*init)(struct nvkm_ram *); int (*init)(struct nvkm_ram *);
......
...@@ -64,7 +64,7 @@ void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor); ...@@ -64,7 +64,7 @@ void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *); int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
void nvkm_i2c_aux_release(struct nvkm_i2c_aux *); void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 size); u32 addr, u8 *data, u8 *size);
int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw, int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
bool enhanced_framing); bool enhanced_framing);
...@@ -162,9 +162,11 @@ nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr) ...@@ -162,9 +162,11 @@ nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
static inline int static inline int
nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size) nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
{ {
const u8 xfer = size;
int ret = nvkm_i2c_aux_acquire(aux); int ret = nvkm_i2c_aux_acquire(aux);
if (ret == 0) { if (ret == 0) {
ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size); ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, &size);
WARN_ON(!ret && size != xfer);
nvkm_i2c_aux_release(aux); nvkm_i2c_aux_release(aux);
} }
return ret; return ret;
...@@ -175,7 +177,7 @@ nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size) ...@@ -175,7 +177,7 @@ nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
{ {
int ret = nvkm_i2c_aux_acquire(aux); int ret = nvkm_i2c_aux_acquire(aux);
if (ret == 0) { if (ret == 0) {
ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size); ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, &size);
nvkm_i2c_aux_release(aux); nvkm_i2c_aux_release(aux);
} }
return ret; return ret;
......
...@@ -7,6 +7,7 @@ struct nvkm_pmu { ...@@ -7,6 +7,7 @@ struct nvkm_pmu {
const struct nvkm_pmu_func *func; const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
struct nvkm_falcon *falcon; struct nvkm_falcon *falcon;
struct nvkm_msgqueue *queue;
struct { struct {
u32 base; u32 base;
......
...@@ -30,10 +30,13 @@ enum nvkm_secboot_falcon { ...@@ -30,10 +30,13 @@ enum nvkm_secboot_falcon {
NVKM_SECBOOT_FALCON_RESERVED = 1, NVKM_SECBOOT_FALCON_RESERVED = 1,
NVKM_SECBOOT_FALCON_FECS = 2, NVKM_SECBOOT_FALCON_FECS = 2,
NVKM_SECBOOT_FALCON_GPCCS = 3, NVKM_SECBOOT_FALCON_GPCCS = 3,
NVKM_SECBOOT_FALCON_END = 4, NVKM_SECBOOT_FALCON_SEC2 = 7,
NVKM_SECBOOT_FALCON_END = 8,
NVKM_SECBOOT_FALCON_INVALID = 0xffffffff, NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
}; };
extern const char *nvkm_secboot_falcon_name[];
/** /**
* @wpr_set: whether the WPR region is currently set * @wpr_set: whether the WPR region is currently set
*/ */
...@@ -42,6 +45,7 @@ struct nvkm_secboot { ...@@ -42,6 +45,7 @@ struct nvkm_secboot {
struct nvkm_acr *acr; struct nvkm_acr *acr;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
struct nvkm_falcon *boot_falcon; struct nvkm_falcon *boot_falcon;
struct nvkm_falcon *halt_falcon;
u64 wpr_addr; u64 wpr_addr;
u32 wpr_size; u32 wpr_size;
...@@ -55,5 +59,6 @@ int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon); ...@@ -55,5 +59,6 @@ int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif #endif
...@@ -1147,6 +1147,7 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) ...@@ -1147,6 +1147,7 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
container_of(obj, typeof(*nv_connector), aux); container_of(obj, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder; struct nouveau_encoder *nv_encoder;
struct nvkm_i2c_aux *aux; struct nvkm_i2c_aux *aux;
u8 size = msg->size;
int ret; int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP); nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
...@@ -1162,11 +1163,11 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) ...@@ -1162,11 +1163,11 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return ret; return ret;
ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address, ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
msg->buffer, msg->size); msg->buffer, &size);
nvkm_i2c_aux_release(aux); nvkm_i2c_aux_release(aux);
if (ret >= 0) { if (ret >= 0) {
msg->reply = ret; msg->reply = ret;
return msg->size; return size;
} }
return ret; return ret;
......
...@@ -3627,7 +3627,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) ...@@ -3627,7 +3627,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c_aux *aux = struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index); nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) { if (aux) {
nv_encoder->i2c = &aux->i2c; nv_encoder->i2c = &nv_connector->aux.ddc;
nv_encoder->aux = aux; nv_encoder->aux = aux;
} }
...@@ -3777,6 +3777,7 @@ nv50_pior_func = { ...@@ -3777,6 +3777,7 @@ nv50_pior_func = {
static int static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{ {
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev); struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL; struct nvkm_i2c_bus *bus = NULL;
...@@ -3794,7 +3795,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) ...@@ -3794,7 +3795,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
break; break;
case DCB_OUTPUT_DP: case DCB_OUTPUT_DP:
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev)); aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
ddc = aux ? &aux->i2c : NULL; ddc = aux ? &nv_connector->aux.ddc : NULL;
type = DRM_MODE_ENCODER_TMDS; type = DRM_MODE_ENCODER_TMDS;
break; break;
default: default:
......
...@@ -78,6 +78,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { ...@@ -78,6 +78,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_NVDEC ] = "nvdec", [NVKM_ENGINE_NVDEC ] = "nvdec",
[NVKM_ENGINE_PM ] = "pm", [NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec", [NVKM_ENGINE_SEC ] = "sec",
[NVKM_ENGINE_SEC2 ] = "sec2",
[NVKM_ENGINE_SW ] = "sw", [NVKM_ENGINE_SW ] = "sw",
[NVKM_ENGINE_VIC ] = "vic", [NVKM_ENGINE_VIC ] = "vic",
[NVKM_ENGINE_VP ] = "vp", [NVKM_ENGINE_VP ] = "vp",
......
...@@ -18,6 +18,7 @@ include $(src)/nvkm/engine/nvenc/Kbuild ...@@ -18,6 +18,7 @@ include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/pm/Kbuild include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild include $(src)/nvkm/engine/sw/Kbuild
include $(src)/nvkm/engine/vic/Kbuild include $(src)/nvkm/engine/vic/Kbuild
include $(src)/nvkm/engine/vp/Kbuild include $(src)/nvkm/engine/vp/Kbuild
...@@ -1379,7 +1379,7 @@ nvc1_chipset = { ...@@ -1379,7 +1379,7 @@ nvc1_chipset = {
.bus = gf100_bus_new, .bus = gf100_bus_new,
.clk = gf100_clk_new, .clk = gf100_clk_new,
.devinit = gf100_devinit_new, .devinit = gf100_devinit_new,
.fb = gf100_fb_new, .fb = gf108_fb_new,
.fuse = gf100_fuse_new, .fuse = gf100_fuse_new,
.gpio = g94_gpio_new, .gpio = g94_gpio_new,
.i2c = g94_i2c_new, .i2c = g94_i2c_new,
...@@ -2200,6 +2200,9 @@ nv132_chipset = { ...@@ -2200,6 +2200,9 @@ nv132_chipset = {
.ltc = gp100_ltc_new, .ltc = gp100_ltc_new,
.mc = gp100_mc_new, .mc = gp100_mc_new,
.mmu = gf100_mmu_new, .mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new, .pci = gp100_pci_new,
.pmu = gp102_pmu_new, .pmu = gp102_pmu_new,
.timer = gk20a_timer_new, .timer = gk20a_timer_new,
...@@ -2211,6 +2214,8 @@ nv132_chipset = { ...@@ -2211,6 +2214,8 @@ nv132_chipset = {
.disp = gp102_disp_new, .disp = gp102_disp_new,
.dma = gf119_dma_new, .dma = gf119_dma_new,
.fifo = gp100_fifo_new, .fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.sw = gf100_sw_new,
}; };
static const struct nvkm_device_chip static const struct nvkm_device_chip
...@@ -2229,6 +2234,9 @@ nv134_chipset = { ...@@ -2229,6 +2234,9 @@ nv134_chipset = {
.ltc = gp100_ltc_new, .ltc = gp100_ltc_new,
.mc = gp100_mc_new, .mc = gp100_mc_new,
.mmu = gf100_mmu_new, .mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new, .pci = gp100_pci_new,
.pmu = gp102_pmu_new, .pmu = gp102_pmu_new,
.timer = gk20a_timer_new, .timer = gk20a_timer_new,
...@@ -2240,6 +2248,8 @@ nv134_chipset = { ...@@ -2240,6 +2248,8 @@ nv134_chipset = {
.disp = gp102_disp_new, .disp = gp102_disp_new,
.dma = gf119_dma_new, .dma = gf119_dma_new,
.fifo = gp100_fifo_new, .fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.sw = gf100_sw_new,
}; };
static const struct nvkm_device_chip static const struct nvkm_device_chip
...@@ -2258,6 +2268,9 @@ nv136_chipset = { ...@@ -2258,6 +2268,9 @@ nv136_chipset = {
.ltc = gp100_ltc_new, .ltc = gp100_ltc_new,
.mc = gp100_mc_new, .mc = gp100_mc_new,
.mmu = gf100_mmu_new, .mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
.sec2 = gp102_sec2_new,
.nvdec = gp102_nvdec_new,
.pci = gp100_pci_new, .pci = gp100_pci_new,
.pmu = gp102_pmu_new, .pmu = gp102_pmu_new,
.timer = gk20a_timer_new, .timer = gk20a_timer_new,
...@@ -2269,6 +2282,8 @@ nv136_chipset = { ...@@ -2269,6 +2282,8 @@ nv136_chipset = {
.disp = gp102_disp_new, .disp = gp102_disp_new,
.dma = gf119_dma_new, .dma = gf119_dma_new,
.fifo = gp100_fifo_new, .fifo = gp100_fifo_new,
.gr = gp102_gr_new,
.sw = gf100_sw_new,
}; };
static int static int
...@@ -2362,9 +2377,10 @@ nvkm_device_engine(struct nvkm_device *device, int index) ...@@ -2362,9 +2377,10 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(NVENC0 , device->nvenc[0], device->nvenc[0]); _(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]); _(NVENC1 , device->nvenc[1], device->nvenc[1]);
_(NVENC2 , device->nvenc[2], device->nvenc[2]); _(NVENC2 , device->nvenc[2], device->nvenc[2]);
_(NVDEC , device->nvdec , device->nvdec); _(NVDEC , device->nvdec , &device->nvdec->engine);
_(PM , device->pm , &device->pm->engine); _(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec); _(SEC , device->sec , device->sec);
_(SEC2 , device->sec2 , &device->sec2->engine);
_(SW , device->sw , &device->sw->engine); _(SW , device->sw , &device->sw->engine);
_(VIC , device->vic , device->vic); _(VIC , device->vic , device->vic);
_(VP , device->vp , device->vp); _(VP , device->vp , device->vp);
...@@ -2812,6 +2828,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, ...@@ -2812,6 +2828,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_NVDEC , nvdec); _(NVKM_ENGINE_NVDEC , nvdec);
_(NVKM_ENGINE_PM , pm); _(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec); _(NVKM_ENGINE_SEC , sec);
_(NVKM_ENGINE_SEC2 , sec2);
_(NVKM_ENGINE_SW , sw); _(NVKM_ENGINE_SW , sw);
_(NVKM_ENGINE_VIC , vic); _(NVKM_ENGINE_VIC , vic);
_(NVKM_ENGINE_VP , vp); _(NVKM_ENGINE_VP , vp);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <engine/nvdec.h> #include <engine/nvdec.h>
#include <engine/pm.h> #include <engine/pm.h>
#include <engine/sec.h> #include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h> #include <engine/sw.h>
#include <engine/vic.h> #include <engine/vic.h>
#include <engine/vp.h> #include <engine/vp.h>
......
...@@ -32,6 +32,7 @@ nvkm-y += nvkm/engine/gr/gm107.o ...@@ -32,6 +32,7 @@ nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm200.o nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o nvkm-y += nvkm/engine/gr/ctxnv50.o
...@@ -50,3 +51,4 @@ nvkm-y += nvkm/engine/gr/ctxgm107.o ...@@ -50,3 +51,4 @@ nvkm-y += nvkm/engine/gr/ctxgm107.o
nvkm-y += nvkm/engine/gr/ctxgm200.o nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o nvkm-y += nvkm/engine/gr/ctxgm20b.o
nvkm-y += nvkm/engine/gr/ctxgp100.o nvkm-y += nvkm/engine/gr/ctxgp100.o
nvkm-y += nvkm/engine/gr/ctxgp102.o
...@@ -102,6 +102,10 @@ void gm200_grctx_generate_405b60(struct gf100_gr *); ...@@ -102,6 +102,10 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
extern const struct gf100_grctx_func gm20b_grctx; extern const struct gf100_grctx_func gm20b_grctx;
extern const struct gf100_grctx_func gp100_grctx; extern const struct gf100_grctx_func gp100_grctx;
void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gp100_grctx_generate_pagepool(struct gf100_grctx *);
extern const struct gf100_grctx_func gp102_grctx;
/* context init value lists */ /* context init value lists */
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* PGRAPH context implementation * PGRAPH context implementation
******************************************************************************/ ******************************************************************************/
static void void
gp100_grctx_generate_pagepool(struct gf100_grctx *info) gp100_grctx_generate_pagepool(struct gf100_grctx *info)
{ {
const struct gf100_grctx_func *grctx = info->gr->func->grctx; const struct gf100_grctx_func *grctx = info->gr->func->grctx;
...@@ -123,7 +123,7 @@ gp100_grctx_generate_405b60(struct gf100_gr *gr) ...@@ -123,7 +123,7 @@ gp100_grctx_generate_405b60(struct gf100_gr *gr)
nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
} }
static void void
gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{ {
struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_device *device = gr->base.engine.subdev.device;
......
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "ctxgf100.h"
#include <subdev/fb.h>
/*******************************************************************************
* PGRAPH context implementation
******************************************************************************/
static void
gp102_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 attrib = grctx->attrib_nr;
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size, (1 << s), access);
const int max_batches = 0xffff;
u32 ao = 0;
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
int gpc, ppc, n = 0;
mmio_refn(info, 0x418810, 0x80000000, s, b);
mmio_refn(info, 0x419848, 0x10000000, s, b);
mmio_refn(info, 0x419c2c, 0x10000000, s, b);
mmio_refn(info, 0x419b00, 0x00000000, s, b);
mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
mmio_wr32(info, 0x405830, attrib);
mmio_wr32(info, 0x40585c, alpha);
mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
continue;
mmio_wr32(info, o + 0xc0, bs);
mmio_wr32(info, p, bs);
mmio_wr32(info, o + 0xf4, bo);
mmio_wr32(info, o + 0xf0, bs);
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, u, bs);
}
}
mmio_wr32(info, 0x4181e4, 0x00000100);
mmio_wr32(info, 0x41befc, 0x00000100);
}
const struct gf100_grctx_func
gp102_grctx = {
.main = gp100_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gm107_grctx_generate_bundle,
.bundle_size = 0x3000,
.bundle_min_gpm_fifo_depth = 0x180,
.bundle_token_limit = 0x900,
.pagepool = gp100_grctx_generate_pagepool,
.pagepool_size = 0x20000,
.attrib = gp102_grctx_generate_attrib,
.attrib_nr_max = 0x5d4,
.attrib_nr = 0x320,
.alpha_nr_max = 0xc00,
.alpha_nr = 0x800,
};
...@@ -1647,8 +1647,18 @@ static int ...@@ -1647,8 +1647,18 @@ static int
gf100_gr_oneinit(struct nvkm_gr *base) gf100_gr_oneinit(struct nvkm_gr *base)
{ {
struct gf100_gr *gr = gf100_gr(base); struct gf100_gr *gr = gf100_gr(base);
struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
int i, j; int i, j;
int ret;
ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
if (ret)
return ret;
ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
if (ret)
return ret;
nvkm_pmu_pgob(device->pmu, false); nvkm_pmu_pgob(device->pmu, false);
...@@ -1856,24 +1866,13 @@ int ...@@ -1856,24 +1866,13 @@ int
gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
int index, struct gf100_gr *gr) int index, struct gf100_gr *gr)
{ {
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
int ret;
gr->func = func; gr->func = func;
gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
func->fecs.ucode == NULL); func->fecs.ucode == NULL);
ret = nvkm_gr_ctor(&gf100_gr_, device, index, return nvkm_gr_ctor(&gf100_gr_, device, index,
gr->firmware || func->fecs.ucode != NULL, gr->firmware || func->fecs.ucode != NULL,
&gr->base); &gr->base);
if (ret)
return ret;
ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
if (ret)
return ret;
return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
} }
int int
......
...@@ -124,6 +124,7 @@ struct gf100_gr_func { ...@@ -124,6 +124,7 @@ struct gf100_gr_func {
void (*init_gpc_mmu)(struct gf100_gr *); void (*init_gpc_mmu)(struct gf100_gr *);
void (*init_rop_active_fbps)(struct gf100_gr *); void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_ppc_exceptions)(struct gf100_gr *); void (*init_ppc_exceptions)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *); void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio; const struct gf100_gr_pack *mmio;
struct { struct {
...@@ -150,6 +151,9 @@ int gk20a_gr_init(struct gf100_gr *); ...@@ -150,6 +151,9 @@ int gk20a_gr_init(struct gf100_gr *);
int gm200_gr_init(struct gf100_gr *); int gm200_gr_init(struct gf100_gr *);
int gm200_gr_rops(struct gf100_gr *); int gm200_gr_rops(struct gf100_gr *);
int gp100_gr_init(struct gf100_gr *);
void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
struct gf100_gr_chan { struct gf100_gr_chan {
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* PGRAPH engine/subdev functions * PGRAPH engine/subdev functions
******************************************************************************/ ******************************************************************************/
static void void
gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
{ {
struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_device *device = gr->base.engine.subdev.device;
...@@ -40,7 +40,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) ...@@ -40,7 +40,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
} }
static int int
gp100_gr_init(struct gf100_gr *gr) gp100_gr_init(struct gf100_gr *gr)
{ {
struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_device *device = gr->base.engine.subdev.device;
...@@ -85,6 +85,8 @@ gp100_gr_init(struct gf100_gr *gr) ...@@ -85,6 +85,8 @@ gp100_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
gr->func->init_rop_active_fbps(gr); gr->func->init_rop_active_fbps(gr);
if (gr->func->init_swdx_pes_mask)
gr->func->init_swdx_pes_mask(gr);
nvkm_wr32(device, 0x400500, 0x00010001); nvkm_wr32(device, 0x400500, 0x00010001);
nvkm_wr32(device, 0x400100, 0xffffffff); nvkm_wr32(device, 0x400100, 0xffffffff);
......
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
static void
gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 mask = 0, data, gpc;
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
data = nvkm_rd32(device, GPC_UNIT(gpc, 0x0c50)) & 0x0000000f;
mask |= data << (gpc * 4);
}
nvkm_wr32(device, 0x4181d0, mask);
}
static const struct gf100_gr_func
gp102_gr = {
.init = gp100_gr_init,
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
.rops = gm200_gr_rops,
.ppc_nr = 3,
.grctx = &gp102_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, PASCAL_B, &gf100_fermi },
{ -1, -1, PASCAL_COMPUTE_B },
{}
}
};
int
gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
return gm200_gr_new_(&gp102_gr, device, index, pgr);
}
#nvkm-y += nvkm/engine/nvdec/base.o nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gp102.o
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <engine/falcon.h>
static int
nvkm_nvdec_oneinit(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
return nvkm_falcon_v1_new(&nvdec->engine.subdev, "NVDEC", 0x84000,
&nvdec->falcon);
}
static void *
nvkm_nvdec_dtor(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
nvkm_falcon_del(&nvdec->falcon);
return nvdec;
}
static const struct nvkm_engine_func
nvkm_nvdec = {
.dtor = nvkm_nvdec_dtor,
.oneinit = nvkm_nvdec_oneinit,
};
int
nvkm_nvdec_new_(struct nvkm_device *device, int index,
struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
&nvdec->engine);
};
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
int
gp102_nvdec_new(struct nvkm_device *device, int index,
struct nvkm_nvdec **pnvdec)
{
return nvkm_nvdec_new_(device, index, pnvdec);
}
#ifndef __NVKM_NVDEC_PRIV_H__
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/msgqueue.h>
#include <engine/falcon.h>
static void *
nvkm_sec2_dtor(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
nvkm_msgqueue_del(&sec2->queue);
nvkm_falcon_del(&sec2->falcon);
return sec2;
}
static void
nvkm_sec2_intr(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
struct nvkm_subdev *subdev = &engine->subdev;
struct nvkm_device *device = subdev->device;
u32 disp = nvkm_rd32(device, 0x8701c);
u32 intr = nvkm_rd32(device, 0x87008) & disp & ~(disp >> 16);
if (intr & 0x00000040) {
schedule_work(&sec2->work);
nvkm_wr32(device, 0x87004, 0x00000040);
intr &= ~0x00000040;
}
if (intr) {
nvkm_error(subdev, "unhandled intr %08x\n", intr);
nvkm_wr32(device, 0x87004, intr);
}
}
static void
nvkm_sec2_recv(struct work_struct *work)
{
struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
nvkm_msgqueue_recv(sec2->queue);
}
static int
nvkm_sec2_oneinit(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
return nvkm_falcon_v1_new(&sec2->engine.subdev, "SEC2", 0x87000,
&sec2->falcon);
}
static int
nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
flush_work(&sec2->work);
return 0;
}
static const struct nvkm_engine_func
nvkm_sec2 = {
.dtor = nvkm_sec2_dtor,
.oneinit = nvkm_sec2_oneinit,
.fini = nvkm_sec2_fini,
.intr = nvkm_sec2_intr,
};
int
nvkm_sec2_new_(struct nvkm_device *device, int index,
struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
INIT_WORK(&sec2->work, nvkm_sec2_recv);
return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
};
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
int
gp102_sec2_new(struct nvkm_device *device, int index,
struct nvkm_sec2 **psec2)
{
return nvkm_sec2_new_(device, index, psec2);
}
#ifndef __NVKM_SEC2_PRIV_H__
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
int nvkm_sec2_new_(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
nvkm-y += nvkm/falcon/base.o nvkm-y += nvkm/falcon/base.o
nvkm-y += nvkm/falcon/v1.o nvkm-y += nvkm/falcon/v1.o
nvkm-y += nvkm/falcon/msgqueue.o
nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
...@@ -41,14 +41,22 @@ void ...@@ -41,14 +41,22 @@ void
nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port) u32 size, u8 port)
{ {
mutex_lock(&falcon->dmem_mutex);
falcon->func->load_dmem(falcon, data, start, size, port); falcon->func->load_dmem(falcon, data, start, size, port);
mutex_unlock(&falcon->dmem_mutex);
} }
void void
nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
void *data) void *data)
{ {
mutex_lock(&falcon->dmem_mutex);
falcon->func->read_dmem(falcon, start, size, port, data); falcon->func->read_dmem(falcon, start, size, port, data);
mutex_unlock(&falcon->dmem_mutex);
} }
void void
...@@ -129,6 +137,9 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) ...@@ -129,6 +137,9 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
void void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{ {
if (unlikely(!falcon))
return;
mutex_lock(&falcon->mutex); mutex_lock(&falcon->mutex);
if (falcon->user == user) { if (falcon->user == user) {
nvkm_debug(falcon->user, "released %s falcon\n", falcon->name); nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
...@@ -159,6 +170,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func, ...@@ -159,6 +170,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr, struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon) struct nvkm_falcon *falcon)
{ {
u32 debug_reg;
u32 reg; u32 reg;
falcon->func = func; falcon->func = func;
...@@ -166,6 +178,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func, ...@@ -166,6 +178,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
falcon->name = name; falcon->name = name;
falcon->addr = addr; falcon->addr = addr;
mutex_init(&falcon->mutex); mutex_init(&falcon->mutex);
mutex_init(&falcon->dmem_mutex);
reg = nvkm_falcon_rd32(falcon, 0x12c); reg = nvkm_falcon_rd32(falcon, 0x12c);
falcon->version = reg & 0xf; falcon->version = reg & 0xf;
...@@ -177,8 +190,31 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func, ...@@ -177,8 +190,31 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
falcon->code.limit = (reg & 0x1ff) << 8; falcon->code.limit = (reg & 0x1ff) << 8;
falcon->data.limit = (reg & 0x3fe00) >> 1; falcon->data.limit = (reg & 0x3fe00) >> 1;
reg = nvkm_falcon_rd32(falcon, 0xc08); switch (subdev->index) {
falcon->debug = (reg >> 20) & 0x1; case NVKM_ENGINE_GR:
debug_reg = 0x0;
break;
case NVKM_SUBDEV_PMU:
debug_reg = 0xc08;
break;
case NVKM_ENGINE_NVDEC:
debug_reg = 0xd00;
break;
case NVKM_ENGINE_SEC2:
debug_reg = 0x408;
falcon->has_emem = true;
break;
default:
nvkm_warn(subdev, "unsupported falcon %s!\n",
nvkm_subdev_name[subdev->index]);
debug_reg = 0;
break;
}
if (debug_reg) {
u32 val = nvkm_falcon_rd32(falcon, debug_reg);
falcon->debug = (val >> 20) & 0x1;
}
} }
void void
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "msgqueue.h"
#include <engine/falcon.h>
#include <subdev/secboot.h>
#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
#define QUEUE_ALIGNMENT 4
/* max size of the messages we can receive */
#define MSG_BUF_SIZE 128
static int
msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
{
struct nvkm_falcon *falcon = priv->falcon;
mutex_lock(&queue->mutex);
queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
return 0;
}
static void
msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
bool commit)
{
struct nvkm_falcon *falcon = priv->falcon;
if (commit)
nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
mutex_unlock(&queue->mutex);
}
static bool
msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
{
struct nvkm_falcon *falcon = priv->falcon;
u32 head, tail;
head = nvkm_falcon_rd32(falcon, queue->head_reg);
tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
return head == tail;
}
static int
msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
void *data, u32 size)
{
struct nvkm_falcon *falcon = priv->falcon;
const struct nvkm_subdev *subdev = priv->falcon->owner;
u32 head, tail, available;
head = nvkm_falcon_rd32(falcon, queue->head_reg);
/* has the buffer looped? */
if (head < queue->position)
queue->position = queue->offset;
tail = queue->position;
available = head - tail;
if (available == 0) {
nvkm_warn(subdev, "no message data available\n");
return 0;
}
if (size > available) {
nvkm_warn(subdev, "message data smaller than read request\n");
size = available;
}
nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
return size;
}
static int
msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
struct nvkm_msgqueue_hdr *hdr)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
int err;
err = msg_queue_open(priv, queue);
if (err) {
nvkm_error(subdev, "fail to open queue %d\n", queue->index);
return err;
}
if (msg_queue_empty(priv, queue)) {
err = 0;
goto close;
}
err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
if (err >= 0 && err != HDR_SIZE)
err = -EINVAL;
if (err < 0) {
nvkm_error(subdev, "failed to read message header: %d\n", err);
goto close;
}
if (hdr->size > MSG_BUF_SIZE) {
nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
err = -ENOSPC;
goto close;
}
if (hdr->size > HDR_SIZE) {
u32 read_size = hdr->size - HDR_SIZE;
err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
if (err >= 0 && err != read_size)
err = -EINVAL;
if (err < 0) {
nvkm_error(subdev, "failed to read message: %d\n", err);
goto close;
}
}
close:
msg_queue_close(priv, queue, (err >= 0));
return err;
}
static bool
cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
u32 size, bool *rewind)
{
struct nvkm_falcon *falcon = priv->falcon;
u32 head, tail, free;
size = ALIGN(size, QUEUE_ALIGNMENT);
head = nvkm_falcon_rd32(falcon, queue->head_reg);
tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
if (head >= tail) {
free = queue->offset + queue->size - head;
free -= HDR_SIZE;
if (size > free) {
*rewind = true;
head = queue->offset;
}
}
if (head < tail)
free = tail - head - 1;
return size <= free;
}
static int
cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
void *data, u32 size)
{
nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
return 0;
}
/* REWIND unit is always 0x00 */
#define MSGQUEUE_UNIT_REWIND 0x00
static void
cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
struct nvkm_msgqueue_hdr cmd;
int err;
cmd.unit_id = MSGQUEUE_UNIT_REWIND;
cmd.size = sizeof(cmd);
err = cmd_queue_push(priv, queue, &cmd, cmd.size);
if (err)
nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
else
nvkm_error(subdev, "queue %d rewinded\n", queue->index);
queue->position = queue->offset;
}
static int
cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
u32 size)
{
struct nvkm_falcon *falcon = priv->falcon;
const struct nvkm_subdev *subdev = priv->falcon->owner;
bool rewind = false;
mutex_lock(&queue->mutex);
if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
nvkm_error(subdev, "queue full\n");
mutex_unlock(&queue->mutex);
return -EAGAIN;
}
queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
if (rewind)
cmd_queue_rewind(priv, queue);
return 0;
}
static void
cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
bool commit)
{
struct nvkm_falcon *falcon = priv->falcon;
if (commit)
nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
mutex_unlock(&queue->mutex);
}
static int
cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
struct nvkm_msgqueue_queue *queue)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
static unsigned long timeout = ~0;
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
int ret = -EAGAIN;
bool commit = true;
while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
ret = cmd_queue_open(priv, queue, cmd->size);
if (ret) {
nvkm_error(subdev, "pmu_queue_open_write failed\n");
return ret;
}
ret = cmd_queue_push(priv, queue, cmd, cmd->size);
if (ret) {
nvkm_error(subdev, "pmu_queue_push failed\n");
commit = false;
}
cmd_queue_close(priv, queue, commit);
return ret;
}
static struct nvkm_msgqueue_seq *
msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
struct nvkm_msgqueue_seq *seq;
u32 index;
mutex_lock(&priv->seq_lock);
index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
nvkm_error(subdev, "no free sequence available\n");
mutex_unlock(&priv->seq_lock);
return ERR_PTR(-EAGAIN);
}
set_bit(index, priv->seq_tbl);
mutex_unlock(&priv->seq_lock);
seq = &priv->seq[index];
seq->state = SEQ_STATE_PENDING;
return seq;
}
static void
msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
{
/* no need to acquire seq_lock since clear_bit is atomic */
seq->state = SEQ_STATE_FREE;
seq->callback = NULL;
seq->completion = NULL;
clear_bit(seq->id, priv->seq_tbl);
}
/* specifies that we want to know the command status in the answer message */
#define CMD_FLAGS_STATUS BIT(0)
/* specifies that we want an interrupt when the answer message is queued */
#define CMD_FLAGS_INTR BIT(1)
int
nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
struct completion *completion, bool wait_init)
{
struct nvkm_msgqueue_seq *seq;
struct nvkm_msgqueue_queue *queue;
int ret;
if (wait_init && !wait_for_completion_timeout(&priv->init_done,
msecs_to_jiffies(1000)))
return -ETIMEDOUT;
queue = priv->func->cmd_queue(priv, prio);
if (IS_ERR(queue))
return PTR_ERR(queue);
seq = msgqueue_seq_acquire(priv);
if (IS_ERR(seq))
return PTR_ERR(seq);
cmd->seq_id = seq->id;
cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
seq->callback = cb;
seq->state = SEQ_STATE_USED;
seq->completion = completion;
ret = cmd_write(priv, cmd, queue);
if (ret) {
seq->state = SEQ_STATE_PENDING;
msgqueue_seq_release(priv, seq);
}
return ret;
}
static int
msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
struct nvkm_msgqueue_seq *seq;
seq = &priv->seq[hdr->seq_id];
if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
return -EINVAL;
}
if (seq->state == SEQ_STATE_USED) {
if (seq->callback)
seq->callback(priv, hdr);
}
if (seq->completion)
complete(seq->completion);
msgqueue_seq_release(priv, seq);
return 0;
}
static int
msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
struct nvkm_msgqueue_hdr *hdr)
{
struct nvkm_falcon *falcon = priv->falcon;
const struct nvkm_subdev *subdev = falcon->owner;
u32 tail;
u32 tail_reg;
int ret;
/*
* Of course the message queue registers vary depending on the falcon
* used...
*/
switch (falcon->owner->index) {
case NVKM_SUBDEV_PMU:
tail_reg = 0x4cc;
break;
case NVKM_ENGINE_SEC2:
tail_reg = 0xa34;
break;
default:
nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
nvkm_subdev_name[falcon->owner->index]);
return -EINVAL;
}
/*
* Read the message - queues are not initialized yet so we cannot rely
* on msg_queue_read()
*/
tail = nvkm_falcon_rd32(falcon, tail_reg);
nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
if (hdr->size > MSG_BUF_SIZE) {
nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
return -ENOSPC;
}
nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
(hdr + 1));
tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
nvkm_falcon_wr32(falcon, tail_reg, tail);
ret = priv->func->init_func->init_callback(priv, hdr);
if (ret)
return ret;
return 0;
}
void
nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
struct nvkm_msgqueue_queue *queue)
{
/*
* We are invoked from a worker thread, so normally we have plenty of
* stack space to work with.
*/
u8 msg_buffer[MSG_BUF_SIZE];
struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
int ret;
/* the first message we receive must be the init message */
if ((!priv->init_msg_received)) {
ret = msgqueue_handle_init_msg(priv, hdr);
if (!ret)
priv->init_msg_received = true;
} else {
while (msg_queue_read(priv, queue, hdr) > 0)
msgqueue_msg_handle(priv, hdr);
}
}
void
nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
{
if (!queue || !queue->func || !queue->func->init_func)
return;
queue->func->init_func->gen_cmdline(queue, buf);
}
int
nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
{
if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
return -ENODEV;
return queue->func->acr_func->boot_falcon(queue, falcon);
}
int
nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
{
const struct nvkm_subdev *subdev = falcon->owner;
int ret = -EINVAL;
switch (version) {
case 0x0137c63d:
ret = msgqueue_0137c63d_new(falcon, queue);
break;
case 0x0148cdec:
ret = msgqueue_0148cdec_new(falcon, queue);
break;
default:
nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
version);
break;
}
if (ret == 0) {
nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
(*queue)->fw_version = version;
}
return ret;
}
void
nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
{
if (*queue) {
(*queue)->func->dtor(*queue);
*queue = NULL;
}
}
void
nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
{
if (!queue || !queue->func || !queue->func->recv) {
const struct nvkm_subdev *subdev = queue->falcon->owner;
nvkm_warn(subdev,
"cmdqueue recv function called while no firmware set!\n");
return;
}
queue->func->recv(queue);
}
int
nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
{
/* firmware not set yet... */
if (!queue)
return 0;
queue->init_msg_received = false;
reinit_completion(&queue->init_done);
return 0;
}
void
nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
struct nvkm_falcon *falcon,
struct nvkm_msgqueue *queue)
{
int i;
queue->func = func;
queue->falcon = falcon;
mutex_init(&queue->seq_lock);
for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
queue->seq[i].id = i;
init_completion(&queue->init_done);
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
#define __NVKM_CORE_FALCON_MSGQUEUE_H
#include <core/msgqueue.h>
/*
* The struct nvkm_msgqueue (named so for lack of better candidate) manages
* a firmware (typically, NVIDIA signed firmware) running under a given falcon.
*
* Such firmwares expect to receive commands (through one or several command
* queues) and will reply to such command by sending messages (using one
* message queue).
*
* Each firmware can support one or several units - ACR for managing secure
* falcons, PMU for power management, etc. A unit can be seen as a class to
* which command can be sent.
*
* One usage example would be to send a command to the SEC falcon to ask it to
* reset a secure falcon. The SEC falcon will receive the command, process it,
* and send a message to signal success or failure. Only when the corresponding
* message is received can the requester assume the request has been processed.
*
* Since we expect many variations between the firmwares NVIDIA will release
* across GPU generations, this library is built in a very modular way. Message
* formats and queues details (such as number of usage) are left to
* specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
* take care of posting commands and processing messages in a fashion that is
* universal.
*
*/
enum msgqueue_msg_priority {
MSGQUEUE_MSG_PRIORITY_HIGH,
MSGQUEUE_MSG_PRIORITY_LOW,
};
/**
* struct nvkm_msgqueue_hdr - header for all commands/messages
* @unit_id: id of firmware using receiving the command/sending the message
* @size: total size of command/message
* @ctrl_flags: type of command/message
* @seq_id: used to match a message from its corresponding command
*/
struct nvkm_msgqueue_hdr {
u8 unit_id;
u8 size;
u8 ctrl_flags;
u8 seq_id;
};
/**
* struct nvkm_msgqueue_msg - base message.
*
* This is just a header and a message (or command) type. Useful when
* building command-specific structures.
*/
struct nvkm_msgqueue_msg {
struct nvkm_msgqueue_hdr hdr;
u8 msg_type;
};
struct nvkm_msgqueue;
typedef void
(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
/**
* struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
*
* @gen_cmdline: build the commandline into a pre-allocated buffer
* @init_callback: called to process the init message
*/
struct nvkm_msgqueue_init_func {
void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
};
/**
* struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
*
* @boot_falcon: build and send the command to reset a given falcon
*/
struct nvkm_msgqueue_acr_func {
int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
};
struct nvkm_msgqueue_func {
const struct nvkm_msgqueue_init_func *init_func;
const struct nvkm_msgqueue_acr_func *acr_func;
void (*dtor)(struct nvkm_msgqueue *);
struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
enum msgqueue_msg_priority);
void (*recv)(struct nvkm_msgqueue *queue);
};
/**
* struct nvkm_msgqueue_queue - information about a command or message queue
*
* The number of queues is firmware-dependent. All queues must have their
* information filled by the init message handler.
*
* @mutex_lock: to be acquired when the queue is being used
* @index: physical queue index
* @offset: DMEM offset where this queue begins
* @size: size allocated to this queue in DMEM (in bytes)
* @position: current write position
* @head_reg: address of the HEAD register for this queue
* @tail_reg: address of the TAIL register for this queue
*/
struct nvkm_msgqueue_queue {
struct mutex mutex;
u32 index;
u32 offset;
u32 size;
u32 position;
u32 head_reg;
u32 tail_reg;
};
/**
* struct nvkm_msgqueue_seq - keep track of ongoing commands
*
* Every time a command is sent, a sequence is assigned to it so the
* corresponding message can be matched. Upon receiving the message, a callback
* can be called and/or a completion signaled.
*
* @id: sequence ID
* @state: current state
* @callback: callback to call upon receiving matching message
* @completion: completion to signal after callback is called
*/
struct nvkm_msgqueue_seq {
u16 id;
enum {
SEQ_STATE_FREE = 0,
SEQ_STATE_PENDING,
SEQ_STATE_USED,
SEQ_STATE_CANCELLED
} state;
nvkm_msgqueue_callback callback;
struct completion *completion;
};
/*
* We can have an arbitrary number of sequences, but realistically we will
* probably not use that much simultaneously.
*/
#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
/**
* struct nvkm_msgqueue - manage a command/message based FW on a falcon
*
* @falcon: falcon to be managed
* @func: implementation of the firmware to use
* @init_msg_received: whether the init message has already been received
* @init_done: whether all init is complete and commands can be processed
* @seq_lock: protects seq and seq_tbl
* @seq: sequences to match commands and messages
* @seq_tbl: bitmap of sequences currently in use
*/
struct nvkm_msgqueue {
struct nvkm_falcon *falcon;
const struct nvkm_msgqueue_func *func;
u32 fw_version;
bool init_msg_received;
struct completion init_done;
struct mutex seq_lock;
struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
};
void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
struct nvkm_msgqueue *);
int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
struct completion *, bool);
void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
struct nvkm_msgqueue_queue *);
int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
#endif
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "msgqueue.h"
#include <engine/falcon.h>
#include <subdev/secboot.h>
/* Queues identifiers */
enum {
/* High Priority Command Queue for Host -> PMU communication */
MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
/* Low Priority Command Queue for Host -> PMU communication */
MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
/* Message queue for PMU -> Host communication */
MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
MSGQUEUE_0137C63D_NUM_QUEUES = 5,
};
struct msgqueue_0137c63d {
struct nvkm_msgqueue base;
struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
};
#define msgqueue_0137c63d(q) \
container_of(q, struct msgqueue_0137c63d, base)
static struct nvkm_msgqueue_queue *
msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
enum msgqueue_msg_priority priority)
{
struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
const struct nvkm_subdev *subdev = priv->base.falcon->owner;
switch (priority) {
case MSGQUEUE_MSG_PRIORITY_HIGH:
return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
case MSGQUEUE_MSG_PRIORITY_LOW:
return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
default:
nvkm_error(subdev, "invalid command queue!\n");
return ERR_PTR(-EINVAL);
}
}
static void
msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
{
struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
struct nvkm_msgqueue_queue *q_queue =
&priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
nvkm_msgqueue_process_msgs(&priv->base, q_queue);
}
/* Init unit */
#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
enum {
INIT_MSG_INIT = 0x0,
};
static void
init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
{
struct {
u32 reserved;
u32 freq_hz;
u32 trace_size;
u32 trace_dma_base;
u16 trace_dma_base1;
u8 trace_dma_offset;
u32 trace_dma_idx;
bool secure_mode;
bool raise_priv_sec;
struct {
u32 dma_base;
u16 dma_base1;
u8 dma_offset;
u16 fb_size;
u8 dma_idx;
} gc6_ctx;
u8 pad;
} *args = buf;
args->secure_mode = 1;
}
/* forward declaration */
static int acr_init_wpr(struct nvkm_msgqueue *queue);
static int
init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
{
struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
struct {
struct nvkm_msgqueue_msg base;
u8 pad;
u16 os_debug_entry_point;
struct {
u16 size;
u16 offset;
u8 index;
u8 pad;
} queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
u16 sw_managed_area_offset;
u16 sw_managed_area_size;
} *init = (void *)hdr;
const struct nvkm_subdev *subdev = _queue->falcon->owner;
int i;
if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
nvkm_error(subdev, "expected message from init unit\n");
return -EINVAL;
}
if (init->base.msg_type != INIT_MSG_INIT) {
nvkm_error(subdev, "expected PMU init msg\n");
return -EINVAL;
}
for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
struct nvkm_msgqueue_queue *queue = &priv->queue[i];
mutex_init(&queue->mutex);
queue->index = init->queue_info[i].index;
queue->offset = init->queue_info[i].offset;
queue->size = init->queue_info[i].size;
if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
queue->head_reg = 0x4a0 + (queue->index * 4);
queue->tail_reg = 0x4b0 + (queue->index * 4);
} else {
queue->head_reg = 0x4c8;
queue->tail_reg = 0x4cc;
}
nvkm_debug(subdev,
"queue %d: index %d, offset 0x%08x, size 0x%08x\n",
i, queue->index, queue->offset, queue->size);
}
/* Complete initialization by initializing WPR region */
return acr_init_wpr(&priv->base);
}
static const struct nvkm_msgqueue_init_func
msgqueue_0137c63d_init_func = {
.gen_cmdline = init_gen_cmdline,
.init_callback = init_callback,
};
/* ACR unit */
#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
enum {
ACR_CMD_INIT_WPR_REGION = 0x00,
ACR_CMD_BOOTSTRAP_FALCON = 0x01,
};
static void
acr_init_wpr_callback(struct nvkm_msgqueue *queue,
struct nvkm_msgqueue_hdr *hdr)
{
struct {
struct nvkm_msgqueue_msg base;
u32 error_code;
} *msg = (void *)hdr;
const struct nvkm_subdev *subdev = queue->falcon->owner;
if (msg->error_code) {
nvkm_error(subdev, "ACR WPR init failure: %d\n",
msg->error_code);
return;
}
nvkm_debug(subdev, "ACR WPR init complete\n");
complete_all(&queue->init_done);
}
static int
acr_init_wpr(struct nvkm_msgqueue *queue)
{
/*
* region_id: region ID in WPR region
* wpr_offset: offset in WPR region
*/
struct {
struct nvkm_msgqueue_hdr hdr;
u8 cmd_type;
u32 region_id;
u32 wpr_offset;
} cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
cmd.hdr.size = sizeof(cmd);
cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
cmd.region_id = 0x01;
cmd.wpr_offset = 0x00;
nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
acr_init_wpr_callback, NULL, false);
return 0;
}
static void
acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
struct nvkm_msgqueue_hdr *hdr)
{
struct acr_bootstrap_falcon_msg {
struct nvkm_msgqueue_msg base;
u32 falcon_id;
} *msg = (void *)hdr;
const struct nvkm_subdev *subdev = priv->falcon->owner;
u32 falcon_id = msg->falcon_id;
if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
nvkm_error(subdev, "in bootstrap falcon callback:\n");
nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
return;
}
nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
}
enum {
ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
};
static int
acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
{
DECLARE_COMPLETION_ONSTACK(completed);
/*
* flags - Flag specifying RESET or no RESET.
* falcon id - Falcon id specifying falcon to bootstrap.
*/
struct {
struct nvkm_msgqueue_hdr hdr;
u8 cmd_type;
u32 flags;
u32 falcon_id;
} cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
cmd.hdr.size = sizeof(cmd);
cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
cmd.falcon_id = falcon;
nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
acr_boot_falcon_callback, &completed, true);
if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
return -ETIMEDOUT;
return 0;
}
static const struct nvkm_msgqueue_acr_func
msgqueue_0137c63d_acr_func = {
.boot_falcon = acr_boot_falcon,
};
static void
msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
{
kfree(msgqueue_0137c63d(queue));
}
static const struct nvkm_msgqueue_func
msgqueue_0137c63d_func = {
.init_func = &msgqueue_0137c63d_init_func,
.acr_func = &msgqueue_0137c63d_acr_func,
.cmd_queue = msgqueue_0137c63d_cmd_queue,
.recv = msgqueue_0137c63d_process_msgs,
.dtor = msgqueue_0137c63d_dtor,
};
int
msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
{
struct msgqueue_0137c63d *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return -ENOMEM;
*queue = &ret->base;
nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
return 0;
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "msgqueue.h"
#include <engine/falcon.h>
#include <subdev/secboot.h>
/*
* This firmware runs on the SEC falcon. It only has one command and one
* message queue, and uses a different command line and init message.
*/
enum {
MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
MSGQUEUE_0148CDEC_NUM_QUEUES,
};
struct msgqueue_0148cdec {
struct nvkm_msgqueue base;
struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
};
#define msgqueue_0148cdec(q) \
container_of(q, struct msgqueue_0148cdec, base)
static struct nvkm_msgqueue_queue *
msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
enum msgqueue_msg_priority priority)
{
struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
}
static void
msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
{
struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
struct nvkm_msgqueue_queue *q_queue =
&priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
nvkm_msgqueue_process_msgs(&priv->base, q_queue);
}
/* Init unit */
#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
enum {
INIT_MSG_INIT = 0x0,
};
static void
init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
{
struct {
u32 freq_hz;
u32 falc_trace_size;
u32 falc_trace_dma_base;
u32 falc_trace_dma_idx;
bool secure_mode;
} *args = buf;
args->secure_mode = false;
}
static int
init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
{
struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
struct {
struct nvkm_msgqueue_msg base;
u8 num_queues;
u16 os_debug_entry_point;
struct {
u32 offset;
u16 size;
u8 index;
u8 id;
} queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
u16 sw_managed_area_offset;
u16 sw_managed_area_size;
} *init = (void *)hdr;
const struct nvkm_subdev *subdev = _queue->falcon->owner;
int i;
if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
nvkm_error(subdev, "expected message from init unit\n");
return -EINVAL;
}
if (init->base.msg_type != INIT_MSG_INIT) {
nvkm_error(subdev, "expected SEC init msg\n");
return -EINVAL;
}
for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
u8 id = init->queue_info[i].id;
struct nvkm_msgqueue_queue *queue = &priv->queue[id];
mutex_init(&queue->mutex);
queue->index = init->queue_info[i].index;
queue->offset = init->queue_info[i].offset;
queue->size = init->queue_info[i].size;
if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
queue->head_reg = 0xa30 + (queue->index * 8);
queue->tail_reg = 0xa34 + (queue->index * 8);
} else {
queue->head_reg = 0xa00 + (queue->index * 8);
queue->tail_reg = 0xa04 + (queue->index * 8);
}
nvkm_debug(subdev,
"queue %d: index %d, offset 0x%08x, size 0x%08x\n",
id, queue->index, queue->offset, queue->size);
}
complete_all(&_queue->init_done);
return 0;
}
static const struct nvkm_msgqueue_init_func
msgqueue_0148cdec_init_func = {
.gen_cmdline = init_gen_cmdline,
.init_callback = init_callback,
};
/* ACR unit */
#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
enum {
ACR_CMD_BOOTSTRAP_FALCON = 0x00,
};
static void
acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
struct nvkm_msgqueue_hdr *hdr)
{
struct acr_bootstrap_falcon_msg {
struct nvkm_msgqueue_msg base;
u32 error_code;
u32 falcon_id;
} *msg = (void *)hdr;
const struct nvkm_subdev *subdev = priv->falcon->owner;
u32 falcon_id = msg->falcon_id;
if (msg->error_code) {
nvkm_error(subdev, "in bootstrap falcon callback:\n");
nvkm_error(subdev, "expected error code 0x%x\n",
msg->error_code);
return;
}
if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
nvkm_error(subdev, "in bootstrap falcon callback:\n");
nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
return;
}
nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
}
enum {
ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
};
static int
acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
{
DECLARE_COMPLETION_ONSTACK(completed);
/*
* flags - Flag specifying RESET or no RESET.
* falcon id - Falcon id specifying falcon to bootstrap.
*/
struct {
struct nvkm_msgqueue_hdr hdr;
u8 cmd_type;
u32 flags;
u32 falcon_id;
} cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
cmd.hdr.size = sizeof(cmd);
cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
cmd.falcon_id = falcon;
nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
acr_boot_falcon_callback, &completed, true);
if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
return -ETIMEDOUT;
return 0;
}
const struct nvkm_msgqueue_acr_func
msgqueue_0148cdec_acr_func = {
.boot_falcon = acr_boot_falcon,
};
static void
msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
{
kfree(msgqueue_0148cdec(queue));
}
const struct nvkm_msgqueue_func
msgqueue_0148cdec_func = {
.init_func = &msgqueue_0148cdec_init_func,
.acr_func = &msgqueue_0148cdec_acr_func,
.cmd_queue = msgqueue_0148cdec_cmd_queue,
.recv = msgqueue_0148cdec_process_msgs,
.dtor = msgqueue_0148cdec_dtor,
};
int
msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
{
struct msgqueue_0148cdec *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return -ENOMEM;
*queue = &ret->base;
nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
return 0;
}
...@@ -40,8 +40,8 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, ...@@ -40,8 +40,8 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
for (i = 0; i < size / 4; i++) { for (i = 0; i < size / 4; i++) {
/* write new tag every 256B */ /* write new tag every 256B */
if ((i & 0x3f) == 0) if ((i & 0x3f) == 0)
nvkm_falcon_wr32(falcon, 0x188, tag++); nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]); nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
} }
/* /*
...@@ -53,16 +53,44 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, ...@@ -53,16 +53,44 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
/* write new tag every 256B */ /* write new tag every 256B */
if ((i & 0x3f) == 0) if ((i & 0x3f) == 0)
nvkm_falcon_wr32(falcon, 0x188, tag++); nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1)); nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
extra & (BIT(rem * 8) - 1));
++i; ++i;
} }
/* code must be padded to 0x40 words */ /* code must be padded to 0x40 words */
for (; i & 0x3f; i++) for (; i & 0x3f; i++)
nvkm_falcon_wr32(falcon, 0x184, 0); nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
} }
static void
nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port)
{
u8 rem = size % 4;
int i;
size -= rem;
nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
for (i = 0; i < size / 4; i++)
nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
/*
* If size is not a multiple of 4, mask the last word to ensure garbage
* does not get written
*/
if (rem) {
u32 extra = ((u32 *)data)[i];
nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
extra & (BIT(rem * 8) - 1));
}
}
static const u32 EMEM_START_ADDR = 0x1000000;
static void static void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port) u32 size, u8 port)
...@@ -70,20 +98,53 @@ nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, ...@@ -70,20 +98,53 @@ nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u8 rem = size % 4; u8 rem = size % 4;
int i; int i;
if (start >= EMEM_START_ADDR && falcon->has_emem)
return nvkm_falcon_v1_load_emem(falcon, data,
start - EMEM_START_ADDR, size,
port);
size -= rem; size -= rem;
nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24)); nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
for (i = 0; i < size / 4; i++) for (i = 0; i < size / 4; i++)
nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]); nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
/* /*
* If size is not a multiple of 4, mask the last work to ensure garbage * If size is not a multiple of 4, mask the last word to ensure garbage
* does not get read * does not get written
*/ */
if (rem) { if (rem) {
u32 extra = ((u32 *)data)[i]; u32 extra = ((u32 *)data)[i];
nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1)); nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
extra & (BIT(rem * 8) - 1));
}
}
static void
nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 port, void *data)
{
u8 rem = size % 4;
int i;
size -= rem;
nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
for (i = 0; i < size / 4; i++)
((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
/*
* If size is not a multiple of 4, mask the last word to ensure garbage
* does not get read
*/
if (rem) {
u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
for (i = size; i < size + rem; i++) {
((u8 *)data)[i] = (u8)(extra & 0xff);
extra >>= 8;
}
} }
} }
...@@ -94,18 +155,22 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, ...@@ -94,18 +155,22 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 rem = size % 4; u8 rem = size % 4;
int i; int i;
if (start >= EMEM_START_ADDR && falcon->has_emem)
return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
size, port, data);
size -= rem; size -= rem;
nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25)); nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
for (i = 0; i < size / 4; i++) for (i = 0; i < size / 4; i++)
((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4); ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
/* /*
* If size is not a multiple of 4, mask the last work to ensure garbage * If size is not a multiple of 4, mask the last word to ensure garbage
* does not get read * does not get read
*/ */
if (rem) { if (rem) {
u32 extra = nvkm_falcon_rd32(falcon, 0x1c4); u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
for (i = size; i < size + rem; i++) { for (i = size; i < size + rem; i++) {
((u8 *)data)[i] = (u8)(extra & 0xff); ((u8 *)data)[i] = (u8)(extra & 0xff);
...@@ -118,6 +183,7 @@ static void ...@@ -118,6 +183,7 @@ static void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
{ {
u32 inst_loc; u32 inst_loc;
u32 fbif;
/* disable instance block binding */ /* disable instance block binding */
if (ctx == NULL) { if (ctx == NULL) {
...@@ -125,19 +191,34 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) ...@@ -125,19 +191,34 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
return; return;
} }
switch (falcon->owner->index) {
case NVKM_ENGINE_NVENC0:
case NVKM_ENGINE_NVENC1:
case NVKM_ENGINE_NVENC2:
fbif = 0x800;
break;
case NVKM_SUBDEV_PMU:
fbif = 0xe00;
break;
default:
fbif = 0x600;
break;
}
nvkm_falcon_wr32(falcon, 0x10c, 0x1); nvkm_falcon_wr32(falcon, 0x10c, 0x1);
/* setup apertures - virtual */ /* setup apertures - virtual */
nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
/* setup apertures - physical */ /* setup apertures - physical */
nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */ /* Set context */
switch (nvkm_memory_target(ctx->memory)) { switch (nvkm_memory_target(ctx->memory)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
default: default:
WARN_ON(1); WARN_ON(1);
...@@ -146,9 +227,12 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) ...@@ -146,9 +227,12 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */ /* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
nvkm_falcon_wr32(falcon, 0x480, nvkm_falcon_wr32(falcon, 0x054,
((ctx->addr >> 12) & 0xfffffff) | ((ctx->addr >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30)); (inst_loc << 28) | (1 << 30));
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
} }
static void static void
......
...@@ -20,6 +20,7 @@ nvkm-y += nvkm/subdev/fb/gt215.o ...@@ -20,6 +20,7 @@ nvkm-y += nvkm/subdev/fb/gt215.o
nvkm-y += nvkm/subdev/fb/mcp77.o nvkm-y += nvkm/subdev/fb/mcp77.o
nvkm-y += nvkm/subdev/fb/mcp89.o nvkm-y += nvkm/subdev/fb/mcp89.o
nvkm-y += nvkm/subdev/fb/gf100.o nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gf108.o
nvkm-y += nvkm/subdev/fb/gk104.o nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o nvkm-y += nvkm/subdev/fb/gm107.o
...@@ -42,8 +43,10 @@ nvkm-y += nvkm/subdev/fb/ramnv50.o ...@@ -42,8 +43,10 @@ nvkm-y += nvkm/subdev/fb/ramnv50.o
nvkm-y += nvkm/subdev/fb/ramgt215.o nvkm-y += nvkm/subdev/fb/ramgt215.o
nvkm-y += nvkm/subdev/fb/rammcp77.o nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgf108.o
nvkm-y += nvkm/subdev/fb/ramgk104.o nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o nvkm-y += nvkm/subdev/fb/ramgp100.o
nvkm-y += nvkm/subdev/fb/sddr2.o nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o nvkm-y += nvkm/subdev/fb/sddr3.o
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
#include "ram.h"
static const struct nvkm_fb_func
gf108_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf108_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gf108_fb, device, index, pfb);
}
...@@ -68,7 +68,7 @@ gm200_fb = { ...@@ -68,7 +68,7 @@ gm200_fb = {
.init = gm200_fb_init, .init = gm200_fb_init,
.init_page = gm200_fb_init_page, .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gm107_ram_new, .ram_new = gm200_ram_new,
.memtype_valid = gf100_fb_memtype_valid, .memtype_valid = gf100_fb_memtype_valid,
}; };
......
...@@ -19,13 +19,38 @@ int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **); ...@@ -19,13 +19,38 @@ int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **); void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *); void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram **);
int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *, int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
u32, struct nvkm_ram *); struct nvkm_ram *);
u32 gf100_ram_probe_fbp(const struct nvkm_ram_func *,
struct nvkm_device *, int, int *);
u32 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
struct nvkm_device *, int, int *);
u32 gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **); int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **); void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
int gf100_ram_init(struct nvkm_ram *);
int gf100_ram_calc(struct nvkm_ram *, u32);
int gf100_ram_prog(struct nvkm_ram *);
void gf100_ram_tidy(struct nvkm_ram *);
u32 gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
struct nvkm_device *, int, int *);
int gk104_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram **);
void *gk104_ram_dtor(struct nvkm_ram *);
int gk104_ram_init(struct nvkm_ram *);
int gk104_ram_calc(struct nvkm_ram *, u32);
int gk104_ram_prog(struct nvkm_ram *);
void gk104_ram_tidy(struct nvkm_ram *);
u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
struct nvkm_device *, int, int *);
int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32); u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
int gk104_ram_init(struct nvkm_ram *ram); struct nvkm_device *, int, int *);
/* RAM type-specific MR calculation routines */ /* RAM type-specific MR calculation routines */
int nvkm_sddr2_calc(struct nvkm_ram *); int nvkm_sddr2_calc(struct nvkm_ram *);
...@@ -46,7 +71,9 @@ int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **); ...@@ -46,7 +71,9 @@ int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **); int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gf108_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif #endif
...@@ -124,7 +124,7 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic) ...@@ -124,7 +124,7 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
} }
} }
static int int
gf100_ram_calc(struct nvkm_ram *base, u32 freq) gf100_ram_calc(struct nvkm_ram *base, u32 freq)
{ {
struct gf100_ram *ram = gf100_ram(base); struct gf100_ram *ram = gf100_ram(base);
...@@ -404,7 +404,7 @@ gf100_ram_calc(struct nvkm_ram *base, u32 freq) ...@@ -404,7 +404,7 @@ gf100_ram_calc(struct nvkm_ram *base, u32 freq)
return 0; return 0;
} }
static int int
gf100_ram_prog(struct nvkm_ram *base) gf100_ram_prog(struct nvkm_ram *base)
{ {
struct gf100_ram *ram = gf100_ram(base); struct gf100_ram *ram = gf100_ram(base);
...@@ -413,7 +413,7 @@ gf100_ram_prog(struct nvkm_ram *base) ...@@ -413,7 +413,7 @@ gf100_ram_prog(struct nvkm_ram *base)
return 0; return 0;
} }
static void void
gf100_ram_tidy(struct nvkm_ram *base) gf100_ram_tidy(struct nvkm_ram *base)
{ {
struct gf100_ram *ram = gf100_ram(base); struct gf100_ram *ram = gf100_ram(base);
...@@ -500,7 +500,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -500,7 +500,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return 0; return 0;
} }
static int int
gf100_ram_init(struct nvkm_ram *base) gf100_ram_init(struct nvkm_ram *base)
{ {
static const u8 train0[] = { static const u8 train0[] = {
...@@ -543,77 +543,96 @@ gf100_ram_init(struct nvkm_ram *base) ...@@ -543,77 +543,96 @@ gf100_ram_init(struct nvkm_ram *base)
return 0; return 0;
} }
static const struct nvkm_ram_func u32
gf100_ram_func = { gf100_ram_probe_fbpa_amount(struct nvkm_device *device, int fbpa)
.init = gf100_ram_init, {
.get = gf100_ram_get, return nvkm_rd32(device, 0x11020c + (fbpa * 0x1000));
.put = gf100_ram_put, }
.calc = gf100_ram_calc,
.prog = gf100_ram_prog, u32
.tidy = gf100_ram_tidy, gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
}; struct nvkm_device *device, int fbp, int *pltcs)
{
if (!(fbpao & BIT(fbp))) {
*pltcs = 1;
return func->probe_fbpa_amount(device, fbp);
}
return 0;
}
u32
gf100_ram_probe_fbp(const struct nvkm_ram_func *func,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpao = nvkm_rd32(device, 0x022554);
return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
}
int int
gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
u32 maskaddr, struct nvkm_ram *ram) struct nvkm_ram *ram)
{ {
struct nvkm_subdev *subdev = &fb->subdev; struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios; struct nvkm_bios *bios = device->bios;
const u32 rsvd_head = ( 256 * 1024); /* vga memory */ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u32 parts = nvkm_rd32(device, 0x022438);
u32 pmask = nvkm_rd32(device, maskaddr);
u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
u64 psize, size = 0;
enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios); enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
bool uniform = true; u32 fbps = nvkm_rd32(device, 0x022438);
int ret, i; u64 total = 0, lcomm = ~0, lower, ubase, usize;
int ret, fbp, ltcs, ltcn = 0;
nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask); nvkm_debug(subdev, "%d FBP(s)\n", fbps);
for (fbp = 0; fbp < fbps; fbp++) {
/* read amount of vram attached to each memory controller */ u32 size = func->probe_fbp(func, device, fbp, &ltcs);
for (i = 0; i < parts; i++) { if (size) {
if (pmask & (1 << i)) nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
continue; fbp, size, ltcs);
lcomm = min(lcomm, (u64)(size / ltcs) << 20);
psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20; total += size << 20;
if (psize != bsize) { ltcn += ltcs;
if (psize < bsize) } else {
bsize = psize; nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
uniform = false;
} }
nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
size += psize;
} }
ret = nvkm_ram_ctor(func, fb, type, size, 0, ram); lower = lcomm * ltcn;
ubase = lcomm + func->upper;
usize = total - lower;
nvkm_debug(subdev, "Lower: %4lld MiB @ %010llx\n", lower >> 20, 0ULL);
nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
if (ret) if (ret)
return ret; return ret;
nvkm_mm_fini(&ram->vram); nvkm_mm_fini(&ram->vram);
/* if all controllers have the same amount attached, there's no holes */ /* Some GPUs are in what's known as a "mixed memory" configuration.
if (uniform) { *
* This is either where some FBPs have more memory than the others,
* or where LTCs have been disabled on a FBP.
*/
if (lower != total) {
/* The common memory amount is addressed normally. */
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
} else {
/* otherwise, address lowest common amount from 0GiB */ /* And the rest is much higher in the physical address
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, * space, and may not be usable for certain operations.
((bsize * parts) - rsvd_head) >> */
NVKM_RAM_MM_SHIFT, 1); ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
} else {
/* and the rest starting from (8GiB + common_size) */ /* GPUs without mixed-memory are a lot nicer... */
ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >> ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
NVKM_RAM_MM_SHIFT, (total - rsvd_head - rsvd_tail) >>
(size - (bsize * parts) - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1); NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -624,7 +643,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, ...@@ -624,7 +643,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
} }
int int
gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) gf100_ram_new_(const struct nvkm_ram_func *func,
struct nvkm_fb *fb, struct nvkm_ram **pram)
{ {
struct nvkm_subdev *subdev = &fb->subdev; struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_bios *bios = subdev->device->bios; struct nvkm_bios *bios = subdev->device->bios;
...@@ -635,7 +655,7 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) ...@@ -635,7 +655,7 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM; return -ENOMEM;
*pram = &ram->base; *pram = &ram->base;
ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base); ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret) if (ret)
return ret; return ret;
...@@ -711,3 +731,23 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) ...@@ -711,3 +731,23 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4); ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
return 0; return 0;
} }
static const struct nvkm_ram_func
gf100_ram = {
.upper = 0x0200000000,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
};
int
gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gf100_ram_new_(&gf100_ram, fb, pram);
}
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "ram.h"
u32
gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpt = nvkm_rd32(device, 0x022438);
u32 fbpat = nvkm_rd32(device, 0x02243c);
u32 fbpas = fbpat / fbpt;
u32 fbpa = fbp * fbpas;
u32 size = 0;
while (fbpas--) {
if (!(fbpao & BIT(fbpa)))
size += func->probe_fbpa_amount(device, fbpa);
fbpa++;
}
*pltcs = 1;
return size;
}
static const struct nvkm_ram_func
gf108_ram = {
.upper = 0x0200000000,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
};
int
gf108_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gf100_ram_new_(&gf108_ram, fb, pram);
}
...@@ -1108,7 +1108,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next) ...@@ -1108,7 +1108,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
return ret; return ret;
} }
static int int
gk104_ram_calc(struct nvkm_ram *base, u32 freq) gk104_ram_calc(struct nvkm_ram *base, u32 freq)
{ {
struct gk104_ram *ram = gk104_ram(base); struct gk104_ram *ram = gk104_ram(base);
...@@ -1227,7 +1227,7 @@ gk104_ram_prog_0(struct gk104_ram *ram, u32 freq) ...@@ -1227,7 +1227,7 @@ gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
nvkm_mask(device, 0x10f444, mask, data); nvkm_mask(device, 0x10f444, mask, data);
} }
static int int
gk104_ram_prog(struct nvkm_ram *base) gk104_ram_prog(struct nvkm_ram *base)
{ {
struct gk104_ram *ram = gk104_ram(base); struct gk104_ram *ram = gk104_ram(base);
...@@ -1247,7 +1247,7 @@ gk104_ram_prog(struct nvkm_ram *base) ...@@ -1247,7 +1247,7 @@ gk104_ram_prog(struct nvkm_ram *base)
return (ram->base.next == &ram->base.xition); return (ram->base.next == &ram->base.xition);
} }
static void void
gk104_ram_tidy(struct nvkm_ram *base) gk104_ram_tidy(struct nvkm_ram *base)
{ {
struct gk104_ram *ram = gk104_ram(base); struct gk104_ram *ram = gk104_ram(base);
...@@ -1509,7 +1509,7 @@ gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i) ...@@ -1509,7 +1509,7 @@ gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
return ret; return ret;
} }
static void * void *
gk104_ram_dtor(struct nvkm_ram *base) gk104_ram_dtor(struct nvkm_ram *base)
{ {
struct gk104_ram *ram = gk104_ram(base); struct gk104_ram *ram = gk104_ram(base);
...@@ -1522,31 +1522,14 @@ gk104_ram_dtor(struct nvkm_ram *base) ...@@ -1522,31 +1522,14 @@ gk104_ram_dtor(struct nvkm_ram *base)
return ram; return ram;
} }
static const struct nvkm_ram_func
gk104_ram_func = {
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
{ struct nvkm_ram **pram)
return gk104_ram_ctor(fb, pram, 0x022554);
}
int
gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
{ {
struct nvkm_subdev *subdev = &fb->subdev; struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios; struct nvkm_bios *bios = device->bios;
struct nvkm_gpio *gpio = device->gpio; struct dcb_gpio_func gpio;
struct dcb_gpio_func func;
struct gk104_ram *ram; struct gk104_ram *ram;
int ret, i; int ret, i;
u8 ramcfg = nvbios_ramcfg_index(subdev); u8 ramcfg = nvbios_ramcfg_index(subdev);
...@@ -1556,7 +1539,7 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr) ...@@ -1556,7 +1539,7 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
return -ENOMEM; return -ENOMEM;
*pram = &ram->base; *pram = &ram->base;
ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base); ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret) if (ret)
return ret; return ret;
...@@ -1614,18 +1597,18 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr) ...@@ -1614,18 +1597,18 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
} }
/* lookup memory voltage gpios */ /* lookup memory voltage gpios */
ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func); ret = nvkm_gpio_find(device->gpio, 0, 0x18, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) { if (ret == 0) {
ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04)); ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12; ram->fuc.r_funcMV[0] = (gpio.log[0] ^ 2) << 12;
ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12; ram->fuc.r_funcMV[1] = (gpio.log[1] ^ 2) << 12;
} }
ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); ret = nvkm_gpio_find(device->gpio, 0, 0x2e, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) { if (ret == 0) {
ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04)); ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12; ram->fuc.r_func2E[0] = (gpio.log[0] ^ 2) << 12;
ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12; ram->fuc.r_func2E[1] = (gpio.log[1] ^ 2) << 12;
} }
ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604); ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
...@@ -1717,3 +1700,24 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr) ...@@ -1717,3 +1700,24 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
ram->fuc.r_0x100750 = ramfuc_reg(0x100750); ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0; return 0;
} }
static const struct nvkm_ram_func
gk104_ram = {
.upper = 0x0200000000,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gk104_ram_new_(&gk104_ram, fb, pram);
}
...@@ -23,8 +23,31 @@ ...@@ -23,8 +23,31 @@
*/ */
#include "ram.h" #include "ram.h"
u32
gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpao = nvkm_rd32(device, 0x021c14);
return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
}
static const struct nvkm_ram_func
gm107_ram = {
.upper = 0x1000000000,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{ {
return gk104_ram_ctor(fb, pram, 0x021c14); return gk104_ram_new_(&gm107_ram, fb, pram);
} }
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "ram.h"
u32
gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 ltcs = nvkm_rd32(device, 0x022450);
u32 fbpas = nvkm_rd32(device, 0x022458);
u32 fbpa = fbp * fbpas;
u32 size = 0;
if (!(nvkm_rd32(device, 0x021d38) & BIT(fbp))) {
u32 ltco = nvkm_rd32(device, 0x021d70 + (fbp * 4));
u32 ltcm = ~ltco & ((1 << ltcs) - 1);
while (fbpas--) {
if (!(fbpao & (1 << fbpa)))
size += func->probe_fbpa_amount(device, fbpa);
fbpa++;
}
*pltcs = hweight32(ltcm);
}
return size;
}
static const struct nvkm_ram_func
gm200_ram = {
.upper = 0x1000000000,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int
gm200_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gk104_ram_new_(&gm200_ram, fb, pram);
}
...@@ -76,8 +76,18 @@ gp100_ram_init(struct nvkm_ram *ram) ...@@ -76,8 +76,18 @@ gp100_ram_init(struct nvkm_ram *ram)
return 0; return 0;
} }
static u32
gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
{
return nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
}
static const struct nvkm_ram_func static const struct nvkm_ram_func
gp100_ram_func = { gp100_ram = {
.upper = 0x1000000000,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
.init = gp100_ram_init, .init = gp100_ram_init,
.get = gf100_ram_get, .get = gf100_ram_get,
.put = gf100_ram_put, .put = gf100_ram_put,
...@@ -87,60 +97,10 @@ int ...@@ -87,60 +97,10 @@ int
gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{ {
struct nvkm_ram *ram; struct nvkm_ram *ram;
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
u32 fbio_opt = nvkm_rd32(device, 0x021c14);
u64 part, size = 0, comm = ~0ULL;
bool mixed = false;
int ret;
nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
if (!(fbio_opt & (1 << fbpa))) {
part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
part = part << 20;
if (part != comm) {
if (comm != ~0ULL)
mixed = true;
comm = min(comm, part);
}
size = size + part;
}
}
ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
*pram = ram;
if (ret)
return ret;
nvkm_mm_fini(&ram->vram); if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
if (mixed) { return gf100_ram_ctor(&gp100_ram, fb, ram);
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
((comm * fbpa_num) - rsvd_head) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
NVKM_RAM_MM_SHIFT,
(size - (comm * fbpa_num) - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
} else {
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
}
return 0;
} }
...@@ -134,7 +134,7 @@ struct anx9805_aux { ...@@ -134,7 +134,7 @@ struct anx9805_aux {
static int static int
anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry, anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
u8 type, u32 addr, u8 *data, u8 size) u8 type, u32 addr, u8 *data, u8 *size)
{ {
struct anx9805_aux *aux = anx9805_aux(base); struct anx9805_aux *aux = anx9805_aux(base);
struct anx9805_pad *pad = aux->pad; struct anx9805_pad *pad = aux->pad;
...@@ -143,7 +143,7 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry, ...@@ -143,7 +143,7 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
u8 buf[16] = {}; u8 buf[16] = {};
u8 tmp; u8 tmp;
AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size); AUX_DBG(&aux->base, "%02x %05x %d", type, addr, *size);
tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04; tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04); nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
...@@ -152,12 +152,12 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry, ...@@ -152,12 +152,12 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80); nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
if (!(type & 1)) { if (!(type & 1)) {
memcpy(buf, data, size); memcpy(buf, data, *size);
AUX_DBG(&aux->base, "%16ph", buf); AUX_DBG(&aux->base, "%16ph", buf);
for (i = 0; i < size; i++) for (i = 0; i < *size; i++)
nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]); nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
} }
nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type); nvkm_wri2cr(adap, aux->addr, 0xe5, ((*size - 1) << 4) | type);
nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0); nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8); nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16); nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
...@@ -176,10 +176,10 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry, ...@@ -176,10 +176,10 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
} }
if (type & 1) { if (type & 1) {
for (i = 0; i < size; i++) for (i = 0; i < *size; i++)
buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i); buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
AUX_DBG(&aux->base, "%16ph", buf); AUX_DBG(&aux->base, "%16ph", buf);
memcpy(data, buf, size); memcpy(data, buf, *size);
} }
ret = 0; ret = 0;
......
...@@ -51,7 +51,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) ...@@ -51,7 +51,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16) if (mcnt || remaining > 16)
cmd |= 4; /* MOT */ cmd |= 4; /* MOT */
ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt); ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
if (ret < 0) { if (ret < 0) {
nvkm_i2c_aux_release(aux); nvkm_i2c_aux_release(aux);
return ret; return ret;
...@@ -115,7 +115,7 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux) ...@@ -115,7 +115,7 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
int int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 size) u32 addr, u8 *data, u8 *size)
{ {
return aux->func->xfer(aux, retry, type, addr, data, size); return aux->func->xfer(aux, retry, type, addr, data, size);
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
struct nvkm_i2c_aux_func { struct nvkm_i2c_aux_func {
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 size); u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
bool enhanced_framing); bool enhanced_framing);
}; };
...@@ -15,7 +15,7 @@ int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, ...@@ -15,7 +15,7 @@ int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **); int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 size); u32 addr, u8 *data, u8 *size);
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
......
...@@ -74,7 +74,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux) ...@@ -74,7 +74,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
static int static int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 size) u8 type, u32 addr, u8 *data, u8 *size)
{ {
struct g94_i2c_aux *aux = g94_i2c_aux(obj); struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device; struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
...@@ -83,7 +83,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -83,7 +83,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u32 xbuf[4] = {}; u32 xbuf[4] = {};
int ret, i; int ret, i;
AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size); AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = g94_i2c_aux_init(aux); ret = g94_i2c_aux_init(aux);
if (ret < 0) if (ret < 0)
...@@ -97,7 +97,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -97,7 +97,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
} }
if (!(type & 1)) { if (!(type & 1)) {
memcpy(xbuf, data, size); memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) { for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]); AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]); nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
...@@ -107,7 +107,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -107,7 +107,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
ctrl = nvkm_rd32(device, 0x00e4e4 + base); ctrl = nvkm_rd32(device, 0x00e4e4 + base);
ctrl &= ~0x0001f0ff; ctrl &= ~0x0001f0ff;
ctrl |= type << 12; ctrl |= type << 12;
ctrl |= size - 1; ctrl |= *size - 1;
nvkm_wr32(device, 0x00e4e0 + base, addr); nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */ /* (maybe) retry transaction a number of times on failure... */
...@@ -151,7 +151,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -151,7 +151,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i); xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]); AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
} }
memcpy(data, xbuf, size); memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
} }
out: out:
......
...@@ -74,7 +74,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) ...@@ -74,7 +74,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
static int static int
gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 size) u8 type, u32 addr, u8 *data, u8 *size)
{ {
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj); struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device; struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
...@@ -83,7 +83,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -83,7 +83,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u32 xbuf[4] = {}; u32 xbuf[4] = {};
int ret, i; int ret, i;
AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size); AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = gm200_i2c_aux_init(aux); ret = gm200_i2c_aux_init(aux);
if (ret < 0) if (ret < 0)
...@@ -97,7 +97,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -97,7 +97,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
} }
if (!(type & 1)) { if (!(type & 1)) {
memcpy(xbuf, data, size); memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) { for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]); AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]); nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
...@@ -107,7 +107,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -107,7 +107,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
ctrl = nvkm_rd32(device, 0x00d954 + base); ctrl = nvkm_rd32(device, 0x00d954 + base);
ctrl &= ~0x0001f0ff; ctrl &= ~0x0001f0ff;
ctrl |= type << 12; ctrl |= type << 12;
ctrl |= size - 1; ctrl |= *size - 1;
nvkm_wr32(device, 0x00d950 + base, addr); nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */ /* (maybe) retry transaction a number of times on failure... */
...@@ -151,7 +151,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, ...@@ -151,7 +151,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i); xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]); AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
} }
memcpy(data, xbuf, size); memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
} }
out: out:
......
...@@ -30,7 +30,7 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) ...@@ -30,7 +30,7 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400)); u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
} }
...@@ -41,7 +41,7 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i) ...@@ -41,7 +41,7 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400)); u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
} }
...@@ -52,7 +52,7 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) ...@@ -52,7 +52,7 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400)); u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
} }
......
...@@ -30,7 +30,7 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) ...@@ -30,7 +30,7 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800)); u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
} }
...@@ -41,7 +41,7 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i) ...@@ -41,7 +41,7 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800)); u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
} }
...@@ -52,7 +52,7 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) ...@@ -52,7 +52,7 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800)); u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "priv.h" #include "priv.h"
#include <core/msgqueue.h>
#include <subdev/timer.h> #include <subdev/timer.h>
void void
...@@ -85,6 +86,7 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu) ...@@ -85,6 +86,7 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
); );
/* Reset. */ /* Reset. */
if (pmu->func->reset)
pmu->func->reset(pmu); pmu->func->reset(pmu);
/* Wait for IMEM/DMEM scrubbing to be complete. */ /* Wait for IMEM/DMEM scrubbing to be complete. */
...@@ -113,10 +115,18 @@ nvkm_pmu_init(struct nvkm_subdev *subdev) ...@@ -113,10 +115,18 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
return ret; return ret;
} }
static int
nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
}
static void * static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev) nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{ {
struct nvkm_pmu *pmu = nvkm_pmu(subdev); struct nvkm_pmu *pmu = nvkm_pmu(subdev);
nvkm_msgqueue_del(&pmu->queue);
nvkm_falcon_del(&pmu->falcon); nvkm_falcon_del(&pmu->falcon);
return nvkm_pmu(subdev); return nvkm_pmu(subdev);
} }
...@@ -125,6 +135,7 @@ static const struct nvkm_subdev_func ...@@ -125,6 +135,7 @@ static const struct nvkm_subdev_func
nvkm_pmu = { nvkm_pmu = {
.dtor = nvkm_pmu_dtor, .dtor = nvkm_pmu_dtor,
.preinit = nvkm_pmu_preinit, .preinit = nvkm_pmu_preinit,
.oneinit = nvkm_pmu_oneinit,
.init = nvkm_pmu_init, .init = nvkm_pmu_init,
.fini = nvkm_pmu_fini, .fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr, .intr = nvkm_pmu_intr,
...@@ -138,7 +149,7 @@ nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, ...@@ -138,7 +149,7 @@ nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
pmu->func = func; pmu->func = func;
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait); init_waitqueue_head(&pmu->recv.wait);
return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); return 0;
} }
int int
......
...@@ -20,15 +20,30 @@ ...@@ -20,15 +20,30 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <engine/falcon.h>
#include <core/msgqueue.h>
#include "priv.h" #include "priv.h"
static void
gm20b_pmu_recv(struct nvkm_pmu *pmu)
{
nvkm_msgqueue_recv(pmu->queue);
}
static const struct nvkm_pmu_func static const struct nvkm_pmu_func
gm20b_pmu = { gm20b_pmu = {
.reset = gt215_pmu_reset, .intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
}; };
int int
gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{ {
return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); int ret;
ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
if (ret)
return ret;
return 0;
} }
nvkm-y += nvkm/subdev/secboot/base.o nvkm-y += nvkm/subdev/secboot/base.o
nvkm-y += nvkm/subdev/secboot/hs_ucode.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
nvkm-y += nvkm/subdev/secboot/acr.o nvkm-y += nvkm/subdev/secboot/acr.o
nvkm-y += nvkm/subdev/secboot/acr_r352.o nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/acr_r364.o
nvkm-y += nvkm/subdev/secboot/acr_r367.o
nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
...@@ -37,12 +37,10 @@ struct nvkm_acr_func { ...@@ -37,12 +37,10 @@ struct nvkm_acr_func {
void (*dtor)(struct nvkm_acr *); void (*dtor)(struct nvkm_acr *);
int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
int (*load)(struct nvkm_acr *, struct nvkm_secboot *, int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64); struct nvkm_gpuobj *, u64);
int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
enum nvkm_secboot_falcon); enum nvkm_secboot_falcon);
int (*start)(struct nvkm_acr *, struct nvkm_secboot *,
enum nvkm_secboot_falcon);
}; };
/** /**
...@@ -50,7 +48,7 @@ struct nvkm_acr_func { ...@@ -50,7 +48,7 @@ struct nvkm_acr_func {
* *
* @boot_falcon: ID of the falcon that will perform secure boot * @boot_falcon: ID of the falcon that will perform secure boot
* @managed_falcons: bitfield of falcons managed by this ACR * @managed_falcons: bitfield of falcons managed by this ACR
* @start_address: virtual start address of the HS bootloader * @optional_falcons: bitfield of falcons we can live without
*/ */
struct nvkm_acr { struct nvkm_acr {
const struct nvkm_acr_func *func; const struct nvkm_acr_func *func;
...@@ -58,12 +56,15 @@ struct nvkm_acr { ...@@ -58,12 +56,15 @@ struct nvkm_acr {
enum nvkm_secboot_falcon boot_falcon; enum nvkm_secboot_falcon boot_falcon;
unsigned long managed_falcons; unsigned long managed_falcons;
u32 start_address; unsigned long optional_falcons;
}; };
void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
struct nvkm_acr *acr_r352_new(unsigned long); struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long); struct nvkm_acr *acr_r361_new(unsigned long);
struct nvkm_acr *acr_r364_new(unsigned long);
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif #endif
...@@ -21,35 +21,16 @@ ...@@ -21,35 +21,16 @@
*/ */
#include "acr_r352.h" #include "acr_r352.h"
#include "hs_ucode.h"
#include <core/gpuobj.h> #include <core/gpuobj.h>
#include <core/firmware.h> #include <core/firmware.h>
#include <engine/falcon.h> #include <engine/falcon.h>
#include <subdev/mc.h>
/** #include <subdev/timer.h>
* struct hsf_fw_header - HS firmware descriptor #include <subdev/pmu.h>
* @sig_dbg_offset: offset of the debug signature #include <core/msgqueue.h>
* @sig_dbg_size: size of the debug signature #include <engine/sec2.h>
* @sig_prod_offset: offset of the production signature
* @sig_prod_size: size of the production signature
* @patch_loc: offset of the offset (sic) of where the signature is
* @patch_sig: offset of the offset (sic) to add to sig_*_offset
* @hdr_offset: offset of the load header (see struct hs_load_header)
* @hdr_size: size of above header
*
* This structure is embedded in the HS firmware image at
* hs_bin_hdr.header_offset.
*/
struct hsf_fw_header {
u32 sig_dbg_offset;
u32 sig_dbg_size;
u32 sig_prod_offset;
u32 sig_prod_size;
u32 patch_loc;
u32 patch_sig;
u32 hdr_offset;
u32 hdr_size;
};
/** /**
* struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
...@@ -95,15 +76,14 @@ struct acr_r352_flcn_bl_desc { ...@@ -95,15 +76,14 @@ struct acr_r352_flcn_bl_desc {
*/ */
static void static void
acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *_img, u64 wpr_addr, const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc) void *_desc)
{ {
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
struct acr_r352_flcn_bl_desc *desc = _desc; struct acr_r352_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc; const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data; u64 base, addr_code, addr_data;
base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = (base + pdesc->app_resident_code_offset) >> 8; addr_code = (base + pdesc->app_resident_code_offset) >> 8;
addr_data = (base + pdesc->app_resident_data_offset) >> 8; addr_data = (base + pdesc->app_resident_data_offset) >> 8;
...@@ -166,6 +146,96 @@ struct hsflcn_acr_desc { ...@@ -166,6 +146,96 @@ struct hsflcn_acr_desc {
* Low-secure blob creation * Low-secure blob creation
*/ */
/**
* struct acr_r352_lsf_lsb_header - LS firmware header
* @signature: signature to verify the firmware against
* @ucode_off: offset of the ucode blob in the WPR region. The ucode
* blob contains the bootloader, code and data of the
* LS falcon
* @ucode_size: size of the ucode blob, including bootloader
* @data_size: size of the ucode blob data
* @bl_code_size: size of the bootloader code
* @bl_imem_off: offset in imem of the bootloader
* @bl_data_off: offset of the bootloader data in WPR region
* @bl_data_size: size of the bootloader data
* @app_code_off: offset of the app code relative to ucode_off
* @app_code_size: size of the app code
* @app_data_off: offset of the app data relative to ucode_off
* @app_data_size: size of the app data
* @flags: flags for the secure bootloader
*
* This structure is written into the WPR region for each managed falcon. Each
* instance is referenced by the lsb_offset member of the corresponding
* lsf_wpr_header.
*/
struct acr_r352_lsf_lsb_header {
/**
* LS falcon signatures
* @prd_keys: signature to use in production mode
* @dgb_keys: signature to use in debug mode
* @b_prd_present: whether the production key is present
* @b_dgb_present: whether the debug key is present
* @falcon_id: ID of the falcon the ucode applies to
*/
struct {
u8 prd_keys[2][16];
u8 dbg_keys[2][16];
u32 b_prd_present;
u32 b_dbg_present;
u32 falcon_id;
} signature;
u32 ucode_off;
u32 ucode_size;
u32 data_size;
u32 bl_code_size;
u32 bl_imem_off;
u32 bl_data_off;
u32 bl_data_size;
u32 app_code_off;
u32 app_code_size;
u32 app_data_off;
u32 app_data_size;
u32 flags;
};
/**
* struct acr_r352_lsf_wpr_header - LS blob WPR Header
* @falcon_id: LS falcon ID
* @lsb_offset: offset of the lsb_lsf_header in the WPR region
* @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
* @lazy_bootstrap: skip bootstrapping by ACR
* @status: bootstrapping status
*
* An array of these is written at the beginning of the WPR region, one for
* each managed falcon. The array is terminated by an instance which falcon_id
* is LSF_FALCON_ID_INVALID.
*/
struct acr_r352_lsf_wpr_header {
u32 falcon_id;
u32 lsb_offset;
u32 bootstrap_owner;
u32 lazy_bootstrap;
u32 status;
#define LSF_IMAGE_STATUS_NONE 0
#define LSF_IMAGE_STATUS_COPY 1
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
};
/**
* struct ls_ucode_img_r352 - ucode image augmented with r352 headers
*/
struct ls_ucode_img_r352 {
struct ls_ucode_img base;
struct acr_r352_lsf_wpr_header wpr_header;
struct acr_r352_lsf_lsb_header lsb_header;
};
#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
/** /**
* ls_ucode_img_load() - create a lsf_ucode_img and load it * ls_ucode_img_load() - create a lsf_ucode_img and load it
*/ */
...@@ -255,7 +325,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr, ...@@ -255,7 +325,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
* image size * image size
*/ */
offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
lhdr->ucode_off = offset; _img->ucode_off = lhdr->ucode_off = offset;
offset += _img->ucode_size; offset += _img->ucode_size;
/* /*
...@@ -341,7 +411,7 @@ acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) ...@@ -341,7 +411,7 @@ acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
*/ */
int int
acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct nvkm_gpuobj *wpr_blob, u32 wpr_addr) struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
{ {
struct ls_ucode_img *_img; struct ls_ucode_img *_img;
u32 pos = 0; u32 pos = 0;
...@@ -381,8 +451,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, ...@@ -381,8 +451,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
return 0; return 0;
} }
/* Both size and address of WPR need to be 128K-aligned */ /* Both size and address of WPR need to be 256K-aligned */
#define WPR_ALIGNMENT 0x20000 #define WPR_ALIGNMENT 0x40000
/** /**
* acr_r352_prepare_ls_blob() - prepare the LS blob * acr_r352_prepare_ls_blob() - prepare the LS blob
* *
...@@ -399,7 +469,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -399,7 +469,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
struct ls_ucode_img *img, *t; struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons; unsigned long managed_falcons = acr->base.managed_falcons;
int managed_count = 0; int managed_count = 0;
u32 image_wpr_size; u32 image_wpr_size, ls_blob_size;
int falcon_id; int falcon_id;
int ret; int ret;
...@@ -411,6 +481,12 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -411,6 +481,12 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
img = acr->func->ls_ucode_img_load(acr, falcon_id); img = acr->func->ls_ucode_img_load(acr, falcon_id);
if (IS_ERR(img)) { if (IS_ERR(img)) {
if (acr->base.optional_falcons & BIT(falcon_id)) {
managed_falcons &= ~BIT(falcon_id);
nvkm_info(subdev, "skipping %s falcon...\n",
nvkm_secboot_falcon_name[falcon_id]);
continue;
}
ret = PTR_ERR(img); ret = PTR_ERR(img);
goto cleanup; goto cleanup;
} }
...@@ -419,6 +495,24 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -419,6 +495,24 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
managed_count++; managed_count++;
} }
/* Commit the actual list of falcons we will manage from now on */
acr->base.managed_falcons = managed_falcons;
/*
* If the boot falcon has a firmare, let it manage the bootstrap of other
* falcons.
*/
if (acr->func->ls_func[acr->base.boot_falcon] &&
(managed_falcons & BIT(acr->base.boot_falcon))) {
for_each_set_bit(falcon_id, &managed_falcons,
NVKM_SECBOOT_FALCON_END) {
if (falcon_id == acr->base.boot_falcon)
continue;
acr->lazy_bootstrap |= BIT(falcon_id);
}
}
/* /*
* Fill the WPR and LSF headers with the right offsets and compute * Fill the WPR and LSF headers with the right offsets and compute
* required WPR size * required WPR size
...@@ -426,8 +520,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -426,8 +520,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
ls_blob_size = image_wpr_size;
/*
* If we need a shadow area, allocate twice the size and use the
* upper half as WPR
*/
if (wpr_size == 0 && acr->func->shadow_blob)
ls_blob_size *= 2;
/* Allocate GPU object that will contain the WPR region */ /* Allocate GPU object that will contain the WPR region */
ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT, ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
false, NULL, &acr->ls_blob); false, NULL, &acr->ls_blob);
if (ret) if (ret)
goto cleanup; goto cleanup;
...@@ -438,6 +541,9 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -438,6 +541,9 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
/* If WPR address and size are not fixed, set them to fit the LS blob */ /* If WPR address and size are not fixed, set them to fit the LS blob */
if (wpr_size == 0) { if (wpr_size == 0) {
wpr_addr = acr->ls_blob->addr; wpr_addr = acr->ls_blob->addr;
if (acr->func->shadow_blob)
wpr_addr += acr->ls_blob->size / 2;
wpr_size = image_wpr_size; wpr_size = image_wpr_size;
/* /*
* But if the WPR region is set by the bootloader, it is illegal for * But if the WPR region is set by the bootloader, it is illegal for
...@@ -469,41 +575,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -469,41 +575,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
/** void
* acr_r352_hsf_patch_signature() - patch HS blob with correct signature
*/
static void
acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
{
struct fw_bin_header *hsbin_hdr = acr_image;
struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
void *hs_data = acr_image + hsbin_hdr->data_offset;
void *sig;
u32 sig_size;
/* Falcon in debug or production mode? */
if (sb->boot_falcon->debug) {
sig = acr_image + fw_hdr->sig_dbg_offset;
sig_size = fw_hdr->sig_dbg_size;
} else {
sig = acr_image + fw_hdr->sig_prod_offset;
sig_size = fw_hdr->sig_prod_size;
}
/* Patch signature */
memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
}
static void
acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
struct hsflcn_acr_desc *desc) void *_desc)
{ {
struct hsflcn_acr_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob; struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */ /* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) { if (sb->wpr_size == 0) {
u32 wpr_start = ls_blob->addr; u64 wpr_start = ls_blob->addr;
u32 wpr_end = wpr_start + ls_blob->size; u64 wpr_end = wpr_start + ls_blob->size;
desc->wpr_region_id = 1; desc->wpr_region_id = 1;
desc->regions.no_regions = 2; desc->regions.no_regions = 2;
...@@ -533,8 +615,8 @@ acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, ...@@ -533,8 +615,8 @@ acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->code_dma_base = lower_32_bits(addr_code); bl_desc->code_dma_base = lower_32_bits(addr_code);
bl_desc->non_sec_code_off = hdr->non_sec_code_off; bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size; bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hdr->app[0].sec_code_off; bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hdr->app[0].sec_code_size; bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0; bl_desc->code_entry_point = 0;
bl_desc->data_dma_base = lower_32_bits(addr_data); bl_desc->data_dma_base = lower_32_bits(addr_data);
bl_desc->data_size = hdr->data_size; bl_desc->data_size = hdr->data_size;
...@@ -562,7 +644,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, ...@@ -562,7 +644,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *acr_data; void *acr_data;
int ret; int ret;
acr_image = nvkm_acr_load_firmware(subdev, fw, 0); acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
if (IS_ERR(acr_image)) if (IS_ERR(acr_image))
return PTR_ERR(acr_image); return PTR_ERR(acr_image);
...@@ -571,15 +653,12 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, ...@@ -571,15 +653,12 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
load_hdr = acr_image + fw_hdr->hdr_offset; load_hdr = acr_image + fw_hdr->hdr_offset;
acr_data = acr_image + hsbin_hdr->data_offset; acr_data = acr_image + hsbin_hdr->data_offset;
/* Patch signature */
acr_r352_hsf_patch_signature(sb, acr_image);
/* Patch descriptor with WPR information? */ /* Patch descriptor with WPR information? */
if (patch) { if (patch) {
struct hsflcn_acr_desc *desc; struct hsflcn_acr_desc *desc;
desc = acr_data + load_hdr->data_dma_base; desc = acr_data + load_hdr->data_dma_base;
acr_r352_fixup_hs_desc(acr, sb, desc); acr->func->fixup_hs_desc(acr, sb, desc);
} }
if (load_hdr->num_apps > ACR_R352_MAX_APPS) { if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
...@@ -589,7 +668,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, ...@@ -589,7 +668,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
goto cleanup; goto cleanup;
} }
memcpy(load_header, load_hdr, sizeof(*load_header) + memcpy(load_header, load_hdr, sizeof(*load_header) +
(sizeof(load_hdr->app[0]) * load_hdr->num_apps)); (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
/* Create ACR blob and copy HS data to it */ /* Create ACR blob and copy HS data to it */
ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
...@@ -607,30 +686,6 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, ...@@ -607,30 +686,6 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
return ret; return ret;
} }
static int
acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
struct fw_bin_header *hdr;
struct fw_bl_desc *hsbl_desc;
acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
if (IS_ERR(acr->hsbl_blob)) {
int ret = PTR_ERR(acr->hsbl_blob);
acr->hsbl_blob = NULL;
return ret;
}
hdr = acr->hsbl_blob;
hsbl_desc = acr->hsbl_blob + hdr->header_offset;
/* virtual start address for boot vector */
acr->base.start_address = hsbl_desc->start_tag << 8;
return 0;
}
/** /**
* acr_r352_load_blobs - load blobs common to all ACR V1 versions. * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
* *
...@@ -641,6 +696,7 @@ acr_r352_prepare_hsbl_blob(struct acr_r352 *acr) ...@@ -641,6 +696,7 @@ acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
int int
acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
{ {
struct nvkm_subdev *subdev = &sb->subdev;
int ret; int ret;
/* Firmware already loaded? */ /* Firmware already loaded? */
...@@ -672,10 +728,25 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) ...@@ -672,10 +728,25 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
/* Load the HS firmware bootloader */ /* Load the HS firmware bootloader */
if (!acr->hsbl_blob) { if (!acr->hsbl_blob) {
ret = acr_r352_prepare_hsbl_blob(acr); acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
if (ret) if (IS_ERR(acr->hsbl_blob)) {
ret = PTR_ERR(acr->hsbl_blob);
acr->hsbl_blob = NULL;
return ret;
}
if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
"acr/unload_bl", 0);
if (IS_ERR(acr->hsbl_unload_blob)) {
ret = PTR_ERR(acr->hsbl_unload_blob);
acr->hsbl_unload_blob = NULL;
return ret; return ret;
} }
} else {
acr->hsbl_unload_blob = acr->hsbl_blob;
}
}
acr->firmware_ok = true; acr->firmware_ok = true;
nvkm_debug(&sb->subdev, "LS blob successfully created\n"); nvkm_debug(&sb->subdev, "LS blob successfully created\n");
...@@ -684,35 +755,42 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) ...@@ -684,35 +755,42 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
} }
/** /**
* acr_r352_load() - prepare HS falcon to run the specified blob, mapped * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
* at GPU address offset. *
* Returns the start address to use, or a negative error value.
*/ */
static int static int
acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb, acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct nvkm_gpuobj *blob, u64 offset) struct nvkm_gpuobj *blob, u64 offset)
{ {
struct acr_r352 *acr = acr_r352(_acr); struct acr_r352 *acr = acr_r352(_acr);
struct nvkm_falcon *falcon = sb->boot_falcon;
struct fw_bin_header *hdr = acr->hsbl_blob;
struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
void *blob_data = acr->hsbl_blob + hdr->data_offset;
void *hsbl_code = blob_data + hsbl_desc->code_off;
void *hsbl_data = blob_data + hsbl_desc->data_off;
u32 code_size = ALIGN(hsbl_desc->code_size, 256);
const struct hsf_load_header *load_hdr;
const u32 bl_desc_size = acr->func->hs_bl_desc_size; const u32 bl_desc_size = acr->func->hs_bl_desc_size;
const struct hsf_load_header *load_hdr;
struct fw_bin_header *bl_hdr;
struct fw_bl_desc *hsbl_desc;
void *bl, *blob_data, *hsbl_code, *hsbl_data;
u32 code_size;
u8 bl_desc[bl_desc_size]; u8 bl_desc[bl_desc_size];
/* Find the bootloader descriptor for our blob and copy it */ /* Find the bootloader descriptor for our blob and copy it */
if (blob == acr->load_blob) { if (blob == acr->load_blob) {
load_hdr = &acr->load_bl_header; load_hdr = &acr->load_bl_header;
bl = acr->hsbl_blob;
} else if (blob == acr->unload_blob) { } else if (blob == acr->unload_blob) {
load_hdr = &acr->unload_bl_header; load_hdr = &acr->unload_bl_header;
bl = acr->hsbl_unload_blob;
} else { } else {
nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
return -EINVAL; return -EINVAL;
} }
bl_hdr = bl;
hsbl_desc = bl + bl_hdr->header_offset;
blob_data = bl + bl_hdr->data_offset;
hsbl_code = blob_data + hsbl_desc->code_off;
hsbl_data = blob_data + hsbl_desc->data_off;
code_size = ALIGN(hsbl_desc->code_size, 256);
/* /*
* Copy HS bootloader data * Copy HS bootloader data
*/ */
...@@ -732,23 +810,32 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb, ...@@ -732,23 +810,32 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
bl_desc_size, 0); bl_desc_size, 0);
return 0; return hsbl_desc->start_tag << 8;
} }
static int static int
acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
{ {
struct nvkm_subdev *subdev = &sb->subdev;
int i; int i;
/* Run the unload blob to unprotect the WPR region */ /* Run the unload blob to unprotect the WPR region */
if (acr->unload_blob && sb->wpr_set) { if (acr->unload_blob && sb->wpr_set) {
int ret; int ret;
nvkm_debug(&sb->subdev, "running HS unload blob\n"); nvkm_debug(subdev, "running HS unload blob\n");
ret = sb->func->run_blob(sb, acr->unload_blob); ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
if (ret) if (ret < 0)
return ret; return ret;
nvkm_debug(&sb->subdev, "HS unload blob completed\n"); /*
* Unload blob will return this error code - it is not an error
* and the expected behavior on RM as well
*/
if (ret && ret != 0x1d) {
nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
return -EINVAL;
}
nvkm_debug(subdev, "HS unload blob completed\n");
} }
for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
...@@ -759,9 +846,44 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) ...@@ -759,9 +846,44 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
return 0; return 0;
} }
/**
* Check if the WPR region has been indeed set by the ACR firmware, and
* matches where it should be.
*/
static bool
acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = &sb->subdev;
const struct nvkm_device *device = subdev->device;
u64 wpr_lo, wpr_hi;
u64 wpr_range_lo, wpr_range_hi;
nvkm_wr32(device, 0x100cd4, 0x2);
wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
wpr_lo <<= 8;
nvkm_wr32(device, 0x100cd4, 0x3);
wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
wpr_hi <<= 8;
if (sb->wpr_size != 0) {
wpr_range_lo = sb->wpr_addr;
wpr_range_hi = wpr_range_lo + sb->wpr_size;
} else {
wpr_range_lo = acr->ls_blob->addr;
wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
}
return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
}
static int static int
acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
{ {
const struct nvkm_subdev *subdev = &sb->subdev;
unsigned long managed_falcons = acr->base.managed_falcons;
u32 reg;
int falcon_id;
int ret; int ret;
if (sb->wpr_set) if (sb->wpr_set)
...@@ -772,40 +894,95 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) ...@@ -772,40 +894,95 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
if (ret) if (ret)
return ret; return ret;
nvkm_debug(&sb->subdev, "running HS load blob\n"); nvkm_debug(subdev, "running HS load blob\n");
ret = sb->func->run_blob(sb, acr->load_blob); ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
/* clear halt interrupt */ /* clear halt interrupt */
nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
if (ret) sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
if (ret < 0) {
return ret; return ret;
nvkm_debug(&sb->subdev, "HS load blob completed\n"); } else if (ret > 0) {
nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
return -EINVAL;
}
nvkm_debug(subdev, "HS load blob completed\n");
/* WPR must be set at this point */
if (!sb->wpr_set) {
nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
return -EINVAL;
}
/* Run LS firmwares post_run hooks */
for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
const struct acr_r352_ls_func *func =
acr->func->ls_func[falcon_id];
sb->wpr_set = true; if (func->post_run)
func->post_run(&acr->base, sb);
}
/* Re-start ourselves if we are managed */
if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
return 0;
/* Enable interrupts */
nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
/* Start LS firmware on boot falcon */
nvkm_falcon_start(sb->boot_falcon);
/*
* There is a bug where the LS firmware sometimes require to be started
* twice (this happens only on SEC). Detect and workaround that
* condition.
*
* Once started, the falcon will end up in STOPPED condition (bit 5)
* if successful, or in HALT condition (bit 4) if not.
*/
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
sb->boot_falcon->addr + 0x100)
& 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_debug(subdev, "applying workaround for start bug...");
nvkm_falcon_start(sb->boot_falcon);
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
sb->boot_falcon->addr + 0x100)
& 0x30) != 0)
break;
);
if (reg & BIT(4)) {
nvkm_error(subdev, "%s failed to start\n",
nvkm_secboot_falcon_name[acr->base.boot_falcon]);
return -EINVAL;
}
}
nvkm_debug(subdev, "%s started\n",
nvkm_secboot_falcon_name[acr->base.boot_falcon]);
return 0; return 0;
} }
/* /**
* acr_r352_reset() - execute secure boot from the prepared state * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
* *
* Load the HS bootloader and ask the falcon to run it. This will in turn * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
* load the HS firmware and run it, so once the falcon stops all the managed * disabled. This has the effect of making all managed falcons ready-to-run.
* falcons should have their LS firmware loaded and be ready to run.
*/ */
static int static int
acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon) enum nvkm_secboot_falcon falcon)
{ {
struct acr_r352 *acr = acr_r352(_acr);
int ret; int ret;
/* /*
* Dummy GM200 implementation: perform secure boot each time we are * Perform secure boot each time we are called on FECS. Since only FECS
* called on FECS. Since only FECS and GPCCS are managed and started * and GPCCS are managed and started together, this ought to be safe.
* together, this ought to be safe.
*
* Once we have proper PMU firmware and support, this will be changed
* to a proper call to the PMU method.
*/ */
if (falcon != NVKM_SECBOOT_FALCON_FECS) if (falcon != NVKM_SECBOOT_FALCON_FECS)
goto end; goto end;
...@@ -814,7 +991,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, ...@@ -814,7 +991,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
if (ret) if (ret)
return ret; return ret;
acr_r352_bootstrap(acr, sb); ret = acr_r352_bootstrap(acr, sb);
if (ret) if (ret)
return ret; return ret;
...@@ -823,28 +1000,57 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, ...@@ -823,28 +1000,57 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
return 0; return 0;
} }
/*
* acr_r352_reset() - execute secure boot from the prepared state
*
* Load the HS bootloader and ask the falcon to run it. This will in turn
* load the HS firmware and run it, so once the falcon stops all the managed
* falcons should have their LS firmware loaded and be ready to run.
*/
static int static int
acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb, acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon) enum nvkm_secboot_falcon falcon)
{ {
struct acr_r352 *acr = acr_r352(_acr); struct acr_r352 *acr = acr_r352(_acr);
const struct nvkm_subdev *subdev = &sb->subdev; struct nvkm_msgqueue *queue;
int base; const char *fname = nvkm_secboot_falcon_name[falcon];
bool wpr_already_set = sb->wpr_set;
int ret;
/* Make sure secure boot is performed */
ret = acr_r352_bootstrap(acr, sb);
if (ret)
return ret;
switch (falcon) { /* No PMU interface? */
case NVKM_SECBOOT_FALCON_FECS: if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
base = 0x409000; /* Redo secure boot entirely if it was already done */
if (wpr_already_set)
return acr_r352_reset_nopmu(acr, sb, falcon);
/* Else return the result of the initial invokation */
else
return ret;
}
switch (_acr->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
queue = sb->subdev.device->pmu->queue;
break; break;
case NVKM_SECBOOT_FALCON_GPCCS: case NVKM_SECBOOT_FALCON_SEC2:
base = 0x41a000; queue = sb->subdev.device->sec2->queue;
break; break;
default: default:
nvkm_error(subdev, "cannot start unhandled falcon!\n");
return -EINVAL; return -EINVAL;
} }
nvkm_wr32(subdev->device, base + 0x130, 0x00000002); /* Otherwise just ask the LS firmware to reset the falcon */
acr->falcon_state[falcon] = RUNNING; nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
if (ret) {
nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
return ret;
}
nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
return 0; return 0;
} }
...@@ -864,6 +1070,8 @@ acr_r352_dtor(struct nvkm_acr *_acr) ...@@ -864,6 +1070,8 @@ acr_r352_dtor(struct nvkm_acr *_acr)
nvkm_gpuobj_del(&acr->unload_blob); nvkm_gpuobj_del(&acr->unload_blob);
if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
kfree(acr->hsbl_unload_blob);
kfree(acr->hsbl_blob); kfree(acr->hsbl_blob);
nvkm_gpuobj_del(&acr->load_blob); nvkm_gpuobj_del(&acr->load_blob);
nvkm_gpuobj_del(&acr->ls_blob); nvkm_gpuobj_del(&acr->ls_blob);
...@@ -887,8 +1095,88 @@ acr_r352_ls_gpccs_func = { ...@@ -887,8 +1095,88 @@ acr_r352_ls_gpccs_func = {
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
}; };
/**
* struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
* @dma_idx: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* @total_code_size: total size of the code part in the ucode
* @code_size_to_load: size of the code part to load in PMU IMEM.
* @code_entry_point: entry point in the code.
* @data_dma_base: Physical FB address where data part of ucode is located
* @data_size: Total size of the data portion.
* @overlay_dma_base: Physical Fb address for resident code present in ucode
* @argc: Total number of args
* @argv: offset where args are copied into PMU's DMEM.
*
* Structure used by the PMU bootloader to load the rest of the code
*/
struct acr_r352_pmu_bl_desc {
u32 dma_idx;
u32 code_dma_base;
u32 code_size_total;
u32 code_size_to_load;
u32 code_entry_point;
u32 data_dma_base;
u32 data_size;
u32 overlay_dma_base;
u32 argc;
u32 argv;
u16 code_dma_base1;
u16 data_dma_base1;
u16 overlay_dma_base1;
};
/**
* acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
*
*/
static void
acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r352_pmu_bl_desc *desc = _desc;
u64 base;
u64 addr_code;
u64 addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = (base + pdesc->app_resident_code_offset) >> 8;
addr_data = (base + pdesc->app_resident_data_offset) >> 8;
addr_args = pmu->falcon->data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->dma_idx = FALCON_DMAIDX_UCODE;
desc->code_dma_base = lower_32_bits(addr_code);
desc->code_dma_base1 = upper_32_bits(addr_code);
desc->code_size_total = pdesc->app_size;
desc->code_size_to_load = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = lower_32_bits(addr_data);
desc->data_dma_base1 = upper_32_bits(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->overlay_dma_base = lower_32_bits(addr_code);
desc->overlay_dma_base1 = upper_32_bits(addr_code);
desc->argc = 1;
desc->argv = addr_args;
}
static const struct acr_r352_ls_func
acr_r352_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
};
const struct acr_r352_func const struct acr_r352_func
acr_r352_func = { acr_r352_func = {
.fixup_hs_desc = acr_r352_fixup_hs_desc,
.generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load, .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
...@@ -897,6 +1185,7 @@ acr_r352_func = { ...@@ -897,6 +1185,7 @@ acr_r352_func = {
.ls_func = { .ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
}, },
}; };
...@@ -906,7 +1195,6 @@ acr_r352_base_func = { ...@@ -906,7 +1195,6 @@ acr_r352_base_func = {
.fini = acr_r352_fini, .fini = acr_r352_fini,
.load = acr_r352_load, .load = acr_r352_load,
.reset = acr_r352_reset, .reset = acr_r352_reset,
.start = acr_r352_start,
}; };
struct nvkm_acr * struct nvkm_acr *
...@@ -915,6 +1203,13 @@ acr_r352_new_(const struct acr_r352_func *func, ...@@ -915,6 +1203,13 @@ acr_r352_new_(const struct acr_r352_func *func,
unsigned long managed_falcons) unsigned long managed_falcons)
{ {
struct acr_r352 *acr; struct acr_r352 *acr;
int i;
/* Check that all requested falcons are supported */
for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
if (!func->ls_func[i])
return ERR_PTR(-ENOTSUPP);
}
acr = kzalloc(sizeof(*acr), GFP_KERNEL); acr = kzalloc(sizeof(*acr), GFP_KERNEL);
if (!acr) if (!acr)
......
...@@ -24,131 +24,27 @@ ...@@ -24,131 +24,27 @@
#include "acr.h" #include "acr.h"
#include "ls_ucode.h" #include "ls_ucode.h"
#include "hs_ucode.h"
struct ls_ucode_img; struct ls_ucode_img;
#define ACR_R352_MAX_APPS 8 #define ACR_R352_MAX_APPS 8
/*
*
* LS blob structures
*
*/
/**
* struct acr_r352_lsf_lsb_header - LS firmware header
* @signature: signature to verify the firmware against
* @ucode_off: offset of the ucode blob in the WPR region. The ucode
* blob contains the bootloader, code and data of the
* LS falcon
* @ucode_size: size of the ucode blob, including bootloader
* @data_size: size of the ucode blob data
* @bl_code_size: size of the bootloader code
* @bl_imem_off: offset in imem of the bootloader
* @bl_data_off: offset of the bootloader data in WPR region
* @bl_data_size: size of the bootloader data
* @app_code_off: offset of the app code relative to ucode_off
* @app_code_size: size of the app code
* @app_data_off: offset of the app data relative to ucode_off
* @app_data_size: size of the app data
* @flags: flags for the secure bootloader
*
* This structure is written into the WPR region for each managed falcon. Each
* instance is referenced by the lsb_offset member of the corresponding
* lsf_wpr_header.
*/
struct acr_r352_lsf_lsb_header {
/**
* LS falcon signatures
* @prd_keys: signature to use in production mode
* @dgb_keys: signature to use in debug mode
* @b_prd_present: whether the production key is present
* @b_dgb_present: whether the debug key is present
* @falcon_id: ID of the falcon the ucode applies to
*/
struct {
u8 prd_keys[2][16];
u8 dbg_keys[2][16];
u32 b_prd_present;
u32 b_dbg_present;
u32 falcon_id;
} signature;
u32 ucode_off;
u32 ucode_size;
u32 data_size;
u32 bl_code_size;
u32 bl_imem_off;
u32 bl_data_off;
u32 bl_data_size;
u32 app_code_off;
u32 app_code_size;
u32 app_data_off;
u32 app_data_size;
u32 flags;
#define LSF_FLAG_LOAD_CODE_AT_0 1 #define LSF_FLAG_LOAD_CODE_AT_0 1
#define LSF_FLAG_DMACTL_REQ_CTX 4 #define LSF_FLAG_DMACTL_REQ_CTX 4
#define LSF_FLAG_FORCE_PRIV_LOAD 8 #define LSF_FLAG_FORCE_PRIV_LOAD 8
};
/**
* struct acr_r352_lsf_wpr_header - LS blob WPR Header
* @falcon_id: LS falcon ID
* @lsb_offset: offset of the lsb_lsf_header in the WPR region
* @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
* @lazy_bootstrap: skip bootstrapping by ACR
* @status: bootstrapping status
*
* An array of these is written at the beginning of the WPR region, one for
* each managed falcon. The array is terminated by an instance which falcon_id
* is LSF_FALCON_ID_INVALID.
*/
struct acr_r352_lsf_wpr_header {
u32 falcon_id;
u32 lsb_offset;
u32 bootstrap_owner;
u32 lazy_bootstrap;
u32 status;
#define LSF_IMAGE_STATUS_NONE 0
#define LSF_IMAGE_STATUS_COPY 1
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
};
/**
* struct ls_ucode_img_r352 - ucode image augmented with r352 headers
*/
struct ls_ucode_img_r352 {
struct ls_ucode_img base;
struct acr_r352_lsf_wpr_header wpr_header;
struct acr_r352_lsf_lsb_header lsb_header;
};
#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
/*
* HS blob structures
*/
struct hsf_load_header_app { static inline u32
u32 sec_code_off; hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
u32 sec_code_size; {
}; return hdr->apps[app];
}
/** static inline u32
* struct hsf_load_header - HS firmware load header hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
*/ {
struct hsf_load_header { return hdr->apps[hdr->num_apps + app];
u32 non_sec_code_off; }
u32 non_sec_code_size;
u32 data_dma_base;
u32 data_size;
u32 num_apps;
struct hsf_load_header_app app[0];
};
/** /**
* struct acr_r352_ls_func - manages a single LS firmware * struct acr_r352_ls_func - manages a single LS firmware
...@@ -157,6 +53,7 @@ struct hsf_load_header { ...@@ -157,6 +53,7 @@ struct hsf_load_header {
* @generate_bl_desc: function called on a block of bl_desc_size to generate the * @generate_bl_desc: function called on a block of bl_desc_size to generate the
* proper bootloader descriptor for this LS firmware * proper bootloader descriptor for this LS firmware
* @bl_desc_size: size of the bootloader descriptor * @bl_desc_size: size of the bootloader descriptor
* @post_run: hook called right after the ACR is executed
* @lhdr_flags: LS flags * @lhdr_flags: LS flags
*/ */
struct acr_r352_ls_func { struct acr_r352_ls_func {
...@@ -164,6 +61,7 @@ struct acr_r352_ls_func { ...@@ -164,6 +61,7 @@ struct acr_r352_ls_func {
void (*generate_bl_desc)(const struct nvkm_acr *, void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *); const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size; u32 bl_desc_size;
void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags; u32 lhdr_flags;
}; };
...@@ -179,13 +77,15 @@ struct acr_r352; ...@@ -179,13 +77,15 @@ struct acr_r352;
struct acr_r352_func { struct acr_r352_func {
void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
u64); u64);
void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
u32 hs_bl_desc_size; u32 hs_bl_desc_size;
bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
enum nvkm_secboot_falcon); enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u32); struct nvkm_gpuobj *, u64);
const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
}; };
...@@ -204,19 +104,22 @@ struct acr_r352 { ...@@ -204,19 +104,22 @@ struct acr_r352 {
struct nvkm_gpuobj *load_blob; struct nvkm_gpuobj *load_blob;
struct { struct {
struct hsf_load_header load_bl_header; struct hsf_load_header load_bl_header;
struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS]; u32 __load_apps[ACR_R352_MAX_APPS * 2];
}; };
/* HS FW - unlock WPR region (dGPU only) */ /* HS FW - unlock WPR region (dGPU only) */
struct nvkm_gpuobj *unload_blob; struct nvkm_gpuobj *unload_blob;
struct { struct {
struct hsf_load_header unload_bl_header; struct hsf_load_header unload_bl_header;
struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS]; u32 __unload_apps[ACR_R352_MAX_APPS * 2];
}; };
/* HS bootloader */ /* HS bootloader */
void *hsbl_blob; void *hsbl_blob;
/* HS bootloader for unload blob, if using a different falcon */
void *hsbl_unload_blob;
/* LS FWs, to be loaded by the HS ACR */ /* LS FWs, to be loaded by the HS ACR */
struct nvkm_gpuobj *ls_blob; struct nvkm_gpuobj *ls_blob;
...@@ -245,6 +148,8 @@ struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, ...@@ -245,6 +148,8 @@ struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
enum nvkm_secboot_falcon); enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u32); struct nvkm_gpuobj *, u64);
void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
#endif #endif
...@@ -20,58 +20,23 @@ ...@@ -20,58 +20,23 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include "acr_r352.h" #include "acr_r361.h"
#include <engine/falcon.h> #include <engine/falcon.h>
#include <core/msgqueue.h>
/** #include <subdev/pmu.h>
* struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor #include <engine/sec2.h>
* @signature: 16B signature for secure code. 0s if no secure code
* @ctx_dma: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* (falcon's $xcbase register)
* @non_sec_code_off: offset from code_dma_base where the non-secure code is
* located. The offset must be multiple of 256 to help perf
* @non_sec_code_size: the size of the nonSecure code part.
* @sec_code_off: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @sec_code_size: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @code_entry_point: code entry point which will be invoked by BL after
* code is loaded.
* @data_dma_base: 256B aligned Physical FB Address where data is located.
* (falcon's $xdbase register)
* @data_size: size of data block. Should be multiple of 256B
*
* Structure used by the bootloader to load the rest of the code. This has
* to be filled by host and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/
struct acr_r361_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
};
static void static void
acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *_img, u64 wpr_addr, const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc) void *_desc)
{ {
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
struct acr_r361_flcn_bl_desc *desc = _desc; struct acr_r361_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc; const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data; u64 base, addr_code, addr_data;
base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset; addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset; addr_data = base + pdesc->app_resident_data_offset;
...@@ -84,7 +49,7 @@ acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, ...@@ -84,7 +49,7 @@ acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
desc->data_size = pdesc->app_resident_data_size; desc->data_size = pdesc->app_resident_data_size;
} }
static void void
acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset) u64 offset)
{ {
...@@ -94,8 +59,8 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, ...@@ -94,8 +59,8 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->code_dma_base = u64_to_flcn64(offset); bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->non_sec_code_off = hdr->non_sec_code_off; bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size; bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hdr->app[0].sec_code_off; bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hdr->app[0].sec_code_size; bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0; bl_desc->code_entry_point = 0;
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size; bl_desc->data_size = hdr->data_size;
...@@ -117,8 +82,100 @@ acr_r361_ls_gpccs_func = { ...@@ -117,8 +82,100 @@ acr_r361_ls_gpccs_func = {
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
}; };
struct acr_r361_pmu_bl_desc {
u32 reserved;
u32 dma_idx;
struct flcn_u64 code_dma_base;
u32 total_code_size;
u32 code_size_to_load;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
struct flcn_u64 overlay_dma_base;
u32 argc;
u32 argv;
};
static void
acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r361_pmu_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = pmu->falcon->data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->dma_idx = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->total_code_size = pdesc->app_size;
desc->code_size_to_load = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->overlay_dma_base = u64_to_flcn64(addr_code);
desc->argc = 1;
desc->argv = addr_args;
}
const struct acr_r352_ls_func
acr_r361_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
};
static void
acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
struct acr_r361_pmu_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
/* For some reason we should not add app_resident_code_offset here */
addr_code = base;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = sec->falcon->data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->total_code_size = pdesc->app_size;
desc->code_size_to_load = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->overlay_dma_base = u64_to_flcn64(addr_code);
desc->argc = 1;
/* args are stored at the beginning of EMEM */
desc->argv = 0x01000000;
}
const struct acr_r352_ls_func
acr_r361_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
.post_run = acr_ls_sec2_post_run,
};
const struct acr_r352_func const struct acr_r352_func
acr_r361_func = { acr_r361_func = {
.fixup_hs_desc = acr_r352_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load, .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
...@@ -127,6 +184,8 @@ acr_r361_func = { ...@@ -127,6 +184,8 @@ acr_r361_func = {
.ls_func = { .ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
}, },
}; };
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R361_H__
#define __NVKM_SECBOOT_ACR_R361_H__
#include "acr_r352.h"
/**
* struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
* @signature: 16B signature for secure code. 0s if no secure code
* @ctx_dma: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* (falcon's $xcbase register)
* @non_sec_code_off: offset from code_dma_base where the non-secure code is
* located. The offset must be multiple of 256 to help perf
* @non_sec_code_size: the size of the nonSecure code part.
* @sec_code_off: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @sec_code_size: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @code_entry_point: code entry point which will be invoked by BL after
* code is loaded.
* @data_dma_base: 256B aligned Physical FB Address where data is located.
* (falcon's $xdbase register)
* @data_size: size of data block. Should be multiple of 256B
*
* Structure used by the bootloader to load the rest of the code. This has
* to be filled by host and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/
struct acr_r361_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
};
void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
#endif
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r361.h"
#include <core/gpuobj.h>
/*
* r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
* parameter.
*/
struct acr_r364_hsflcn_desc {
union {
u8 reserved_dmem[0x200];
u32 signatures[4];
} ucode_reserved_space;
u32 wpr_region_id;
u32 wpr_offset;
u32 mmu_memory_range;
struct {
u32 no_regions;
struct {
u32 start_addr;
u32 end_addr;
u32 region_id;
u32 read_mask;
u32 write_mask;
u32 client_mask;
u32 shadow_mem_start_addr;
} region_props[2];
} regions;
u32 ucode_blob_size;
u64 ucode_blob_base __aligned(8);
struct {
u32 vpr_enabled;
u32 vpr_start;
u32 vpr_end;
u32 hdcp_policies;
} vpr_desc;
};
static void
acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *_desc)
{
struct acr_r364_hsflcn_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) {
u64 wpr_start = ls_blob->addr;
u64 wpr_end = ls_blob->addr + ls_blob->size;
if (acr->func->shadow_blob)
wpr_start += ls_blob->size / 2;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = wpr_start >> 8;
desc->regions.region_props[0].end_addr = wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
if (acr->func->shadow_blob)
desc->regions.region_props[0].shadow_mem_start_addr =
ls_blob->addr >> 8;
else
desc->regions.region_props[0].shadow_mem_start_addr = 0;
} else {
desc->ucode_blob_base = ls_blob->addr;
desc->ucode_blob_size = ls_blob->size;
}
}
const struct acr_r352_func
acr_r364_func = {
.fixup_hs_desc = acr_r364_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
.ls_fill_headers = acr_r352_ls_fill_headers,
.ls_write_wpr = acr_r352_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
},
};
struct nvkm_acr *
acr_r364_new(unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
managed_falcons);
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r367.h"
#include "acr_r361.h"
#include <core/gpuobj.h>
/*
* r367 ACR: new LS signature format requires a rewrite of LS firmware and
* blob creation functions. Also the hsflcn_desc layout has changed slightly.
*/
#define LSF_LSB_DEPMAP_SIZE 11
/**
* struct acr_r367_lsf_lsb_header - LS firmware header
*
* See also struct acr_r352_lsf_lsb_header for documentation.
*/
struct acr_r367_lsf_lsb_header {
/**
* LS falcon signatures
* @prd_keys: signature to use in production mode
* @dgb_keys: signature to use in debug mode
* @b_prd_present: whether the production key is present
* @b_dgb_present: whether the debug key is present
* @falcon_id: ID of the falcon the ucode applies to
*/
struct {
u8 prd_keys[2][16];
u8 dbg_keys[2][16];
u32 b_prd_present;
u32 b_dbg_present;
u32 falcon_id;
u32 supports_versioning;
u32 version;
u32 depmap_count;
u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
u8 kdf[16];
} signature;
u32 ucode_off;
u32 ucode_size;
u32 data_size;
u32 bl_code_size;
u32 bl_imem_off;
u32 bl_data_off;
u32 bl_data_size;
u32 app_code_off;
u32 app_code_size;
u32 app_data_off;
u32 app_data_size;
u32 flags;
};
/**
* struct acr_r367_lsf_wpr_header - LS blob WPR Header
*
* See also struct acr_r352_lsf_wpr_header for documentation.
*/
struct acr_r367_lsf_wpr_header {
u32 falcon_id;
u32 lsb_offset;
u32 bootstrap_owner;
u32 lazy_bootstrap;
u32 bin_version;
u32 status;
#define LSF_IMAGE_STATUS_NONE 0
#define LSF_IMAGE_STATUS_COPY 1
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
};
/**
* struct ls_ucode_img_r367 - ucode image augmented with r367 headers
*/
struct ls_ucode_img_r367 {
struct ls_ucode_img base;
struct acr_r367_lsf_wpr_header wpr_header;
struct acr_r367_lsf_lsb_header lsb_header;
};
#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
struct ls_ucode_img_r367 *img;
int ret;
img = kzalloc(sizeof(*img), GFP_KERNEL);
if (!img)
return ERR_PTR(-ENOMEM);
img->base.falcon_id = falcon_id;
ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
if (ret) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
nvkm_secboot_falcon_name[falcon_id]);
return ERR_PTR(-EINVAL);
}
/* Copy signature to the right place */
memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
/* not needed? the signature should already have the right value */
img->lsb_header.signature.falcon_id = falcon_id;
return &img->base;
}
#define LSF_LSB_HEADER_ALIGN 256
#define LSF_BL_DATA_ALIGN 256
#define LSF_BL_DATA_SIZE_ALIGN 256
#define LSF_BL_CODE_SIZE_ALIGN 256
#define LSF_UCODE_DATA_ALIGN 4096
static u32
acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
struct ls_ucode_img_r367 *img, u32 offset)
{
struct ls_ucode_img *_img = &img->base;
struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
const struct acr_r352_ls_func *func =
acr->func->ls_func[_img->falcon_id];
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
whdr->bootstrap_owner = acr->base.boot_falcon;
whdr->bin_version = lhdr->signature.version;
whdr->status = LSF_IMAGE_STATUS_COPY;
/* Skip bootstrapping falcons started by someone else than ACR */
if (acr->lazy_bootstrap & BIT(_img->falcon_id))
whdr->lazy_bootstrap = 1;
/* Align, save off, and include an LSB header size */
offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
whdr->lsb_offset = offset;
offset += sizeof(*lhdr);
/*
* Align, save off, and include the original (static) ucode
* image size
*/
offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
_img->ucode_off = lhdr->ucode_off = offset;
offset += _img->ucode_size;
/*
* For falcons that use a boot loader (BL), we append a loader
* desc structure on the end of the ucode image and consider
* this the boot loader data. The host will then copy the loader
* desc args to this space within the WPR region (before locking
* down) and the HS bin will then copy them to DMEM 0 for the
* loader.
*/
lhdr->bl_code_size = ALIGN(desc->bootloader_size,
LSF_BL_CODE_SIZE_ALIGN);
lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
lhdr->bl_code_size - lhdr->ucode_size;
/*
* Though the BL is located at 0th offset of the image, the VA
* is different to make sure that it doesn't collide the actual
* OS VA range
*/
lhdr->bl_imem_off = desc->bootloader_imem_offset;
lhdr->app_code_off = desc->app_start_offset +
desc->app_resident_code_offset;
lhdr->app_code_size = desc->app_resident_code_size;
lhdr->app_data_off = desc->app_start_offset +
desc->app_resident_data_offset;
lhdr->app_data_size = desc->app_resident_data_size;
lhdr->flags = func->lhdr_flags;
if (_img->falcon_id == acr->base.boot_falcon)
lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
/* Align and save off BL descriptor size */
lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
/*
* Align, save off, and include the additional BL data
*/
offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
lhdr->bl_data_off = offset;
offset += lhdr->bl_data_size;
return offset;
}
int
acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
{
struct ls_ucode_img_r367 *img;
struct list_head *l;
u32 count = 0;
u32 offset;
/* Count the number of images to manage */
list_for_each(l, imgs)
count++;
/*
* Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA
* read of this array and cache it internally so it's ok to pack these.
* Also, we add 1 to the falcon count to indicate the end of the array.
*/
offset = sizeof(img->wpr_header) * (count + 1);
/*
* Walk the managed falcons, accounting for the LSB structs
* as well as the ucode images.
*/
list_for_each_entry(img, imgs, base.node) {
offset = acr_r367_ls_img_fill_headers(acr, img, offset);
}
return offset;
}
int
acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
{
struct ls_ucode_img *_img;
u32 pos = 0;
nvkm_kmap(wpr_blob);
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
&img->lsb_header, sizeof(img->lsb_header));
/* Generate and write BL descriptor */
memset(gdesc, 0, ls_func->bl_desc_size);
ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
gdesc, ls_func->bl_desc_size);
/* Copy ucode */
nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
_img->ucode_data, _img->ucode_size);
pos += sizeof(img->wpr_header);
}
nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
nvkm_done(wpr_blob);
return 0;
}
struct acr_r367_hsflcn_desc {
u8 reserved_dmem[0x200];
u32 signatures[4];
u32 wpr_region_id;
u32 wpr_offset;
u32 mmu_memory_range;
#define FLCN_ACR_MAX_REGIONS 2
struct {
u32 no_regions;
struct {
u32 start_addr;
u32 end_addr;
u32 region_id;
u32 read_mask;
u32 write_mask;
u32 client_mask;
u32 shadow_mem_start_addr;
} region_props[FLCN_ACR_MAX_REGIONS];
} regions;
u32 ucode_blob_size;
u64 ucode_blob_base __aligned(8);
struct {
u32 vpr_enabled;
u32 vpr_start;
u32 vpr_end;
u32 hdcp_policies;
} vpr_desc;
};
void
acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *_desc)
{
struct acr_r367_hsflcn_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) {
u64 wpr_start = ls_blob->addr;
u64 wpr_end = ls_blob->addr + ls_blob->size;
if (acr->func->shadow_blob)
wpr_start += ls_blob->size / 2;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = wpr_start >> 8;
desc->regions.region_props[0].end_addr = wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
if (acr->func->shadow_blob)
desc->regions.region_props[0].shadow_mem_start_addr =
ls_blob->addr >> 8;
else
desc->regions.region_props[0].shadow_mem_start_addr = 0;
} else {
desc->ucode_blob_base = ls_blob->addr;
desc->ucode_blob_size = ls_blob->size;
}
}
const struct acr_r352_func
acr_r367_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
},
};
struct nvkm_acr *
acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
}
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R367_H__
#define __NVKM_SECBOOT_ACR_R367_H__
#include "acr_r352.h"
void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
enum nvkm_secboot_falcon);
int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u64);
#endif
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r367.h"
#include <engine/falcon.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
/*
* r375 ACR: similar to r367, but with a unified bootloader descriptor
* structure for GR and PMU falcons.
*/
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
struct acr_r375_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
u32 argc;
u32 argv;
};
static void
acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
struct acr_r375_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
}
static void
acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
}
const struct acr_r352_ls_func
acr_r375_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r375_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
/* GPCCS will be loaded using PRI */
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
static void
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r375_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = pmu->falcon->data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->argc = 1;
desc->argv = addr_args;
}
const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run,
};
const struct acr_r352_func
acr_r375_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
},
};
struct nvkm_acr *
acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
}
...@@ -87,6 +87,7 @@ ...@@ -87,6 +87,7 @@
#include <subdev/mc.h> #include <subdev/mc.h>
#include <subdev/timer.h> #include <subdev/timer.h>
#include <subdev/pmu.h> #include <subdev/pmu.h>
#include <engine/sec2.h>
const char * const char *
nvkm_secboot_falcon_name[] = { nvkm_secboot_falcon_name[] = {
...@@ -94,6 +95,7 @@ nvkm_secboot_falcon_name[] = { ...@@ -94,6 +95,7 @@ nvkm_secboot_falcon_name[] = {
[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
[NVKM_SECBOOT_FALCON_FECS] = "FECS", [NVKM_SECBOOT_FALCON_FECS] = "FECS",
[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
[NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
[NVKM_SECBOOT_FALCON_END] = "<invalid>", [NVKM_SECBOOT_FALCON_END] = "<invalid>",
}; };
/** /**
...@@ -131,13 +133,20 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev) ...@@ -131,13 +133,20 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
switch (sb->acr->boot_falcon) { switch (sb->acr->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU: case NVKM_SECBOOT_FALCON_PMU:
sb->boot_falcon = subdev->device->pmu->falcon; sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
break;
case NVKM_SECBOOT_FALCON_SEC2:
/* we must keep SEC2 alive forever since ACR will run on it */
nvkm_engine_ref(&subdev->device->sec2->engine);
sb->boot_falcon = subdev->device->sec2->falcon;
sb->halt_falcon = subdev->device->pmu->falcon;
break; break;
default: default:
nvkm_error(subdev, "Unmanaged boot falcon %s!\n", nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
nvkm_secboot_falcon_name[sb->acr->boot_falcon]); nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
return -EINVAL; return -EINVAL;
} }
nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
/* Call chip-specific init function */ /* Call chip-specific init function */
if (sb->func->oneinit) if (sb->func->oneinit)
......
...@@ -34,12 +34,13 @@ ...@@ -34,12 +34,13 @@
* *
*/ */
int int
gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob) gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
struct nvkm_falcon *falcon)
{ {
struct gm200_secboot *gsb = gm200_secboot(sb); struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev; struct nvkm_subdev *subdev = &gsb->base.subdev;
struct nvkm_falcon *falcon = gsb->base.boot_falcon;
struct nvkm_vma vma; struct nvkm_vma vma;
u32 start_address;
int ret; int ret;
ret = nvkm_falcon_get(falcon, subdev); ret = nvkm_falcon_get(falcon, subdev);
...@@ -60,10 +61,12 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob) ...@@ -60,10 +61,12 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
nvkm_falcon_bind_context(falcon, gsb->inst); nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */ /* Load the HS bootloader into the falcon's IMEM/DMEM */
ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset); ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
if (ret) if (ret < 0)
goto end; goto end;
start_address = ret;
/* Disable interrupts as we will poll for the HALT bit */ /* Disable interrupts as we will poll for the HALT bit */
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
...@@ -71,19 +74,17 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob) ...@@ -71,19 +74,17 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
/* Start the HS bootloader */ /* Start the HS bootloader */
nvkm_falcon_set_start_addr(falcon, sb->acr->start_address); nvkm_falcon_set_start_addr(falcon, start_address);
nvkm_falcon_start(falcon); nvkm_falcon_start(falcon);
ret = nvkm_falcon_wait_for_halt(falcon, 100); ret = nvkm_falcon_wait_for_halt(falcon, 100);
if (ret) if (ret)
goto end; goto end;
/* If mailbox register contains an error code, then ACR has failed */ /*
* The mailbox register contains the (positive) error code - return this
* to the caller
*/
ret = nvkm_falcon_rd32(falcon, 0x040); ret = nvkm_falcon_rd32(falcon, 0x040);
if (ret) {
nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
ret = -EINVAL;
goto end;
}
end: end:
/* Reenable interrupts */ /* Reenable interrupts */
......
...@@ -38,6 +38,7 @@ struct gm200_secboot { ...@@ -38,6 +38,7 @@ struct gm200_secboot {
int gm200_secboot_oneinit(struct nvkm_secboot *); int gm200_secboot_oneinit(struct nvkm_secboot *);
int gm200_secboot_fini(struct nvkm_secboot *, bool); int gm200_secboot_fini(struct nvkm_secboot *, bool);
void *gm200_secboot_dtor(struct nvkm_secboot *); void *gm200_secboot_dtor(struct nvkm_secboot *);
int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *); int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
#endif #endif
...@@ -107,9 +107,12 @@ gm20b_secboot_new(struct nvkm_device *device, int index, ...@@ -107,9 +107,12 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
struct gm200_secboot *gsb; struct gm200_secboot *gsb;
struct nvkm_acr *acr; struct nvkm_acr *acr;
acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS)); acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr)) if (IS_ERR(acr))
return PTR_ERR(acr); return PTR_ERR(acr);
/* Support the initial GM20B firmware release without PMU */
acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) { if (!gsb) {
...@@ -137,3 +140,6 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); ...@@ -137,3 +140,6 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
#include "ls_ucode.h"
#include "hs_ucode.h"
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/falcon.h>
#include <engine/nvdec.h>
static bool
gp102_secboot_scrub_required(struct nvkm_secboot *sb)
{
struct nvkm_subdev *subdev = &sb->subdev;
struct nvkm_device *device = subdev->device;
u32 reg;
nvkm_wr32(device, 0x100cd0, 0x2);
reg = nvkm_rd32(device, 0x100cd0);
return (reg & BIT(4));
}
static int
gp102_run_secure_scrub(struct nvkm_secboot *sb)
{
struct nvkm_subdev *subdev = &sb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_engine *engine;
struct nvkm_falcon *falcon;
void *scrub_image;
struct fw_bin_header *hsbin_hdr;
struct hsf_fw_header *fw_hdr;
struct hsf_load_header *lhdr;
void *scrub_data;
int ret;
nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
if (!(engine = nvkm_engine_ref(&device->nvdec->engine)))
return PTR_ERR(engine);
falcon = device->nvdec->falcon;
nvkm_falcon_get(falcon, &sb->subdev);
scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
if (IS_ERR(scrub_image))
return PTR_ERR(scrub_image);
nvkm_falcon_reset(falcon);
nvkm_falcon_bind_context(falcon, NULL);
hsbin_hdr = scrub_image;
fw_hdr = scrub_image + hsbin_hdr->header_offset;
lhdr = scrub_image + fw_hdr->hdr_offset;
scrub_data = scrub_image + hsbin_hdr->data_offset;
nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
lhdr->non_sec_code_size,
lhdr->non_sec_code_off >> 8, 0, false);
nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
ALIGN(lhdr->apps[0], 0x100),
lhdr->apps[1],
lhdr->apps[0] >> 8, 0, true);
nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
lhdr->data_size, 0);
kfree(scrub_image);
nvkm_falcon_set_start_addr(falcon, 0x0);
nvkm_falcon_start(falcon);
ret = nvkm_falcon_wait_for_halt(falcon, 500);
if (ret < 0) {
nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
ret = -ETIMEDOUT;
goto end;
}
/* put nvdec in clean state - without reset it will remain in HS mode */
nvkm_falcon_reset(falcon);
if (gp102_secboot_scrub_required(sb)) {
nvkm_error(subdev, "VPR scrubber binary failed!\n");
ret = -EINVAL;
goto end;
}
nvkm_debug(subdev, "VPR scrub successfully completed\n");
end:
nvkm_falcon_put(falcon, &sb->subdev);
nvkm_engine_unref(&engine);
return ret;
}
static int
gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
struct nvkm_falcon *falcon)
{
int ret;
/* make sure the VPR region is unlocked */
if (gp102_secboot_scrub_required(sb)) {
ret = gp102_run_secure_scrub(sb);
if (ret)
return ret;
}
return gm200_secboot_run_blob(sb, blob, falcon);
}
static const struct nvkm_secboot_func
gp102_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gp102_secboot_run_blob,
};
int
gp102_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_SEC2));
if (IS_ERR(acr))
return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}
MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "hs_ucode.h"
#include "ls_ucode.h"
#include "acr.h"
#include <engine/falcon.h>
/**
* hs_ucode_patch_signature() - patch HS blob with correct signature for
* specified falcon.
*/
static void
hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
bool new_format)
{
struct fw_bin_header *hsbin_hdr = acr_image;
struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
void *hs_data = acr_image + hsbin_hdr->data_offset;
void *sig;
u32 sig_size;
u32 patch_loc, patch_sig;
/*
* I had the brilliant idea to "improve" the binary format by
* removing this useless indirection. However to make NVIDIA files
* directly compatible, let's support both format.
*/
if (new_format) {
patch_loc = fw_hdr->patch_loc;
patch_sig = fw_hdr->patch_sig;
} else {
patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
}
/* Falcon in debug or production mode? */
if (falcon->debug) {
sig = acr_image + fw_hdr->sig_dbg_offset;
sig_size = fw_hdr->sig_dbg_size;
} else {
sig = acr_image + fw_hdr->sig_prod_offset;
sig_size = fw_hdr->sig_prod_size;
}
/* Patch signature */
memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
}
void *
hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
const char *fw)
{
void *acr_image;
bool new_format;
acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
if (IS_ERR(acr_image))
return acr_image;
/* detect the format to define how signature should be patched */
switch (((u32 *)acr_image)[0]) {
case 0x3b1d14f0:
new_format = true;
break;
case 0x000010de:
new_format = false;
break;
default:
nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
return ERR_PTR(-EINVAL);
}
hs_ucode_patch_signature(falcon, acr_image, new_format);
return acr_image;
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_HS_UCODE_H__
#define __NVKM_SECBOOT_HS_UCODE_H__
#include <core/os.h>
#include <core/subdev.h>
struct nvkm_falcon;
/**
* struct hsf_fw_header - HS firmware descriptor
* @sig_dbg_offset: offset of the debug signature
* @sig_dbg_size: size of the debug signature
* @sig_prod_offset: offset of the production signature
* @sig_prod_size: size of the production signature
* @patch_loc: offset of the offset (sic) of where the signature is
* @patch_sig: offset of the offset (sic) to add to sig_*_offset
* @hdr_offset: offset of the load header (see struct hs_load_header)
* @hdr_size: size of above header
*
* This structure is embedded in the HS firmware image at
* hs_bin_hdr.header_offset.
*/
struct hsf_fw_header {
u32 sig_dbg_offset;
u32 sig_dbg_size;
u32 sig_prod_offset;
u32 sig_prod_size;
u32 patch_loc;
u32 patch_sig;
u32 hdr_offset;
u32 hdr_size;
};
/**
* struct hsf_load_header - HS firmware load header
*/
struct hsf_load_header {
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 data_dma_base;
u32 data_size;
u32 num_apps;
/*
* Organized as follows:
* - app0_code_off
* - app1_code_off
* - ...
* - appn_code_off
* - app0_code_size
* - app1_code_size
* - ...
*/
u32 apps[0];
};
void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
const char *);
#endif
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <core/subdev.h> #include <core/subdev.h>
#include <subdev/secboot.h> #include <subdev/secboot.h>
struct nvkm_acr;
/** /**
* struct ls_ucode_img_desc - descriptor of firmware image * struct ls_ucode_img_desc - descriptor of firmware image
...@@ -83,6 +84,7 @@ struct ls_ucode_img_desc { ...@@ -83,6 +84,7 @@ struct ls_ucode_img_desc {
* @ucode_desc: loaded or generated map of ucode_data * @ucode_desc: loaded or generated map of ucode_data
* @ucode_data: firmware payload (code and data) * @ucode_data: firmware payload (code and data)
* @ucode_size: size in bytes of data in ucode_data * @ucode_size: size in bytes of data in ucode_data
* @ucode_off: offset of the ucode in ucode_data
* @sig: signature for this firmware * @sig: signature for this firmware
* @sig:size: size of the signature in bytes * @sig:size: size of the signature in bytes
* *
...@@ -97,6 +99,7 @@ struct ls_ucode_img { ...@@ -97,6 +99,7 @@ struct ls_ucode_img {
struct ls_ucode_img_desc ucode_desc; struct ls_ucode_img_desc ucode_desc;
u8 *ucode_data; u8 *ucode_data;
u32 ucode_size; u32 ucode_size;
u32 ucode_off;
u8 *sig; u8 *sig;
u32 sig_size; u32 sig_size;
...@@ -146,6 +149,9 @@ struct fw_bl_desc { ...@@ -146,6 +149,9 @@ struct fw_bl_desc {
int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *); int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *); int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif #endif
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "ls_ucode.h"
#include "acr.h"
#include <core/firmware.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
/**
* acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
*
* Load the LS microcode, desc and signature and pack them into a single
* blob.
*/
static int
acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
struct ls_ucode_img *img)
{
const struct firmware *image, *desc, *sig;
char f[64];
int ret;
snprintf(f, sizeof(f), "%s/image", name);
ret = nvkm_firmware_get(subdev->device, f, &image);
if (ret)
return ret;
img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
nvkm_firmware_put(image);
if (!img->ucode_data)
return -ENOMEM;
snprintf(f, sizeof(f), "%s/desc", name);
ret = nvkm_firmware_get(subdev->device, f, &desc);
if (ret)
return ret;
memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
nvkm_firmware_put(desc);
snprintf(f, sizeof(f), "%s/sig", name);
ret = nvkm_firmware_get(subdev->device, f, &sig);
if (ret)
return ret;
img->sig_size = sig->size;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
nvkm_firmware_put(sig);
if (!img->sig)
return -ENOMEM;
return 0;
}
static void
acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
struct nvkm_falcon *falcon, u32 addr_args)
{
u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
u8 buf[cmdline_size];
memset(buf, 0, cmdline_size);
nvkm_msgqueue_write_cmdline(queue, buf);
nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
/* rearm the queue so it will wait for the init message */
nvkm_msgqueue_reinit(queue);
}
int
acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
{
struct nvkm_pmu *pmu = subdev->device->pmu;
int ret;
ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
&pmu->queue);
if (ret)
return ret;
return 0;
}
void
acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
struct nvkm_pmu *pmu = device->pmu;
u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
}
int
acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
struct ls_ucode_img *img)
{
struct nvkm_sec2 *sec = subdev->device->sec2;
int ret;
ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
&sec->queue);
if (ret)
return ret;
return 0;
}
void
acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
struct nvkm_sec2 *sec = device->sec2;
/* on SEC arguments are always at the beginning of EMEM */
u32 addr_args = 0x01000000;
acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
}
...@@ -30,11 +30,10 @@ struct nvkm_secboot_func { ...@@ -30,11 +30,10 @@ struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *); int (*oneinit)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend); int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *); void *(*dtor)(struct nvkm_secboot *);
int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *); int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
}; };
extern const char *nvkm_secboot_falcon_name[];
int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
struct nvkm_device *, int, struct nvkm_secboot *); struct nvkm_device *, int, struct nvkm_secboot *);
int nvkm_secboot_falcon_reset(struct nvkm_secboot *); int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
......
...@@ -82,7 +82,7 @@ gk104_top_oneinit(struct nvkm_top *top) ...@@ -82,7 +82,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x0000000a: A_(MSVLD ); break; case 0x0000000a: A_(MSVLD ); break;
case 0x0000000b: A_(MSENC ); break; case 0x0000000b: A_(MSENC ); break;
case 0x0000000c: A_(VIC ); break; case 0x0000000c: A_(VIC ); break;
case 0x0000000d: A_(SEC ); break; case 0x0000000d: A_(SEC2 ); break;
case 0x0000000e: B_(NVENC ); break; case 0x0000000e: B_(NVENC ); break;
case 0x0000000f: A_(NVENC1); break; case 0x0000000f: A_(NVENC1); break;
case 0x00000010: A_(NVDEC ); break; case 0x00000010: A_(NVDEC ); break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment