Commit 0180290a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/nouveau-misc-2022-07-13-1' of git://anongit.freedesktop.org/drm/drm into drm-next

drm/nouveau next misc

This is a set of misc nouveau patches skeggsb left queued up, just
flushing some of them out.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAPM=9txSS9Pdagpi=3JJeFOGy6ALWC31WZdQxLBkfGeL3O+T1A@mail.gmail.com
parents 1ebdc90e 89ed996b
......@@ -2623,14 +2623,6 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_fini(wndw);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
......@@ -2646,7 +2638,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
struct drm_plane *plane;
if (resume || runtime)
core->func->init(core);
......@@ -2659,13 +2650,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
}
}
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_init(wndw);
}
return 0;
}
......
......@@ -694,18 +694,6 @@ nv50_wndw_notify(struct nvif_notify *notify)
return NVIF_NOTIFY_KEEP;
}
void
nv50_wndw_fini(struct nv50_wndw *wndw)
{
nvif_notify_put(&wndw->notify);
}
void
nv50_wndw_init(struct nv50_wndw *wndw)
{
nvif_notify_get(&wndw->notify);
}
static const u64 nv50_cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
......
......@@ -40,8 +40,6 @@ int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
enum drm_plane_type, const char *name, int index,
const u32 *format, enum nv50_disp_interlock_type,
u32 interlock_data, u32 heads, struct nv50_wndw **);
void nv50_wndw_init(struct nv50_wndw *);
void nv50_wndw_fini(struct nv50_wndw *);
void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
struct nv50_wndw_atom *);
void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
......
......@@ -22,6 +22,12 @@ struct nvif_object {
} map;
};
static inline bool
nvif_object_constructed(struct nvif_object *object)
{
return object->client != NULL;
}
int nvif_object_ctor(struct nvif_object *, const char *name, u32 handle,
s32 oclass, void *, u32, struct nvif_object *);
void nvif_object_dtor(struct nvif_object *);
......
......@@ -2,7 +2,6 @@
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
#include <core/event.h>
enum nvkm_subdev_type;
enum nvkm_device_type {
......@@ -28,8 +27,6 @@ struct nvkm_device {
void __iomem *pri;
struct nvkm_event event;
u32 debug;
const struct nvkm_device_chip *chip;
......
......@@ -4,7 +4,6 @@
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
struct nvkm_gpuobj;
enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0,
......@@ -51,15 +50,6 @@ struct nvkm_falcon {
struct nvkm_engine engine;
};
/* This constructor must be called from the owner's oneinit() hook and
* *not* its constructor. This is to ensure that DEVINIT has been
* completed, and that the device is correctly enabled before we touch
* falcon registers.
*/
int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
struct nvkm_falcon **);
void nvkm_falcon_del(struct nvkm_falcon **);
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
......
......@@ -26,7 +26,6 @@ struct nvkm_fifo_chan {
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
struct nvkm_vmm *vmm;
void __iomem *user;
u64 addr;
u32 size;
......@@ -44,7 +43,6 @@ struct nvkm_fifo {
struct mutex mutex;
struct nvkm_event uevent; /* async user trigger */
struct nvkm_event cevent; /* channel creation event */
struct nvkm_event kevent; /* channel killed */
};
......
......@@ -2,7 +2,6 @@
#ifndef __NVKM_CLK_H__
#define __NVKM_CLK_H__
#include <core/subdev.h>
#include <core/notify.h>
#include <subdev/pci.h>
struct nvbios_pll;
struct nvkm_pll_vals;
......@@ -94,7 +93,6 @@ struct nvkm_clk {
wait_queue_head_t wait;
atomic_t waiting;
struct nvkm_notify pwrsrc_ntfy;
int pwrsrc;
int pstate; /* current */
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
......@@ -124,6 +122,7 @@ int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nvkm_clk_pwrsrc(struct nvkm_device *);
int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
......
#ifndef __NVKM_FAULT_H__
#define __NVKM_FAULT_H__
#include <core/subdev.h>
#include <core/event.h>
#include <core/notify.h>
struct nvkm_fault {
......
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECURE_BOOT_H__
#define __NVKM_SECURE_BOOT_H__
#include <core/subdev.h>
enum nvkm_secboot_falcon {
NVKM_SECBOOT_FALCON_PMU = 0,
NVKM_SECBOOT_FALCON_RESERVED = 1,
NVKM_SECBOOT_FALCON_FECS = 2,
NVKM_SECBOOT_FALCON_GPCCS = 3,
NVKM_SECBOOT_FALCON_SEC2 = 7,
NVKM_SECBOOT_FALCON_END = 8,
NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
};
extern const char *nvkm_secboot_falcon_name[];
/**
* @wpr_set: whether the WPR region is currently set
*/
struct nvkm_secboot {
const struct nvkm_secboot_func *func;
struct nvkm_acr *acr;
struct nvkm_subdev subdev;
struct nvkm_falcon *boot_falcon;
struct nvkm_falcon *halt_falcon;
u64 wpr_addr;
u32 wpr_size;
bool wpr_set;
};
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
......@@ -126,9 +126,8 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
/* wait for all activity to stop before releasing notify object, which
* may be still in use */
if (chan->chan && chan->ntfy)
/* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
/* cleanup notifier state */
......@@ -147,7 +146,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
nouveau_channel_idle(chan->chan);
nvif_object_dtor(&chan->ce);
nouveau_channel_del(&chan->chan);
}
......@@ -325,6 +324,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->nr_subchan = 2;
}
/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
* Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
* channel init, now we know what that stuff actually is.
*
* Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
*
* Userspace was fixed prior to adding Ampere support.
*/
switch (device->info.family) {
case NV_DEVICE_INFO_V0_VOLTA:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
case NV_DEVICE_INFO_V0_TURING:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
default:
break;
}
/* Named memory object area */
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
......
......@@ -21,6 +21,7 @@ struct nouveau_abi16_ntfy {
struct nouveau_abi16_chan {
struct list_head head;
struct nouveau_channel *chan;
struct nvif_object ce;
struct list_head notifiers;
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
......
......@@ -385,7 +385,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nv_dma_v0 args = {};
int ret, i;
nvif_object_map(&chan->user, NULL, 0);
ret = nvif_object_map(&chan->user, NULL, 0);
if (ret)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
......
......@@ -102,7 +102,6 @@ struct nouveau_cli {
struct list_head head;
void *abi16;
struct list_head objects;
struct list_head notifys;
char name[32];
struct work_struct work;
......
......@@ -71,40 +71,11 @@ nvkm_client_suspend(void *priv)
return nvkm_object_fini(&client->object, true);
}
static int
nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
{
const union {
struct nvif_notify_req_v0 v0;
} *args = header;
u8 route;
if (length == sizeof(args->v0) && args->v0.version == 0) {
route = args->v0.route;
} else {
WARN_ON(1);
return NVKM_NOTIFY_DROP;
}
switch (route) {
case NVDRM_NOTIFY_NVIF:
return nvif_notify(header, length, data, size);
case NVDRM_NOTIFY_USIF:
return usif_notify(header, length, data, size);
default:
WARN_ON(1);
break;
}
return NVKM_NOTIFY_DROP;
}
static int
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
const char *dbg, void **ppriv)
{
return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy,
(struct nvkm_client **)ppriv);
return nvkm_client_new(name, device, cfg, dbg, nvif_notify, (struct nvkm_client **)ppriv);
}
const struct nvif_driver
......
......@@ -26,232 +26,15 @@
#include "nouveau_usif.h"
#include "nouveau_abi16.h"
#include <nvif/notify.h>
#include <nvif/unpack.h>
#include <nvif/client.h>
#include <nvif/event.h>
#include <nvif/ioctl.h>
#include <nvif/class.h>
#include <nvif/cl0080.h>
struct usif_notify_p {
struct drm_pending_event base;
struct {
struct drm_event base;
u8 data[];
} e;
};
struct usif_notify {
struct list_head head;
atomic_t enabled;
u32 handle;
u16 reply;
u8 route;
u64 token;
struct usif_notify_p *p;
};
static inline struct usif_notify *
usif_notify_find(struct drm_file *filp, u32 handle)
{
struct nouveau_cli *cli = nouveau_cli(filp);
struct usif_notify *ntfy;
list_for_each_entry(ntfy, &cli->notifys, head) {
if (ntfy->handle == handle)
return ntfy;
}
return NULL;
}
static inline void
usif_notify_dtor(struct usif_notify *ntfy)
{
list_del(&ntfy->head);
kfree(ntfy);
}
int
usif_notify(const void *header, u32 length, const void *data, u32 size)
{
struct usif_notify *ntfy = NULL;
const union {
struct nvif_notify_rep_v0 v0;
} *rep = header;
struct drm_device *dev;
struct drm_file *filp;
unsigned long flags;
if (length == sizeof(rep->v0) && rep->v0.version == 0) {
if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token)))
return NVIF_NOTIFY_DROP;
BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF);
} else
if (WARN_ON(1))
return NVIF_NOTIFY_DROP;
if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
return NVIF_NOTIFY_DROP;
filp = ntfy->p->base.file_priv;
dev = filp->minor->dev;
memcpy(&ntfy->p->e.data[0], header, length);
memcpy(&ntfy->p->e.data[length], data, size);
switch (rep->v0.version) {
case 0: {
struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
rep->route = ntfy->route;
rep->token = ntfy->token;
}
break;
default:
BUG();
break;
}
spin_lock_irqsave(&dev->event_lock, flags);
if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
list_add_tail(&ntfy->p->base.link, &filp->event_list);
filp->event_space -= ntfy->p->e.base.length;
}
wake_up_interruptible(&filp->event_wait);
spin_unlock_irqrestore(&dev->event_lock, flags);
atomic_set(&ntfy->enabled, 0);
return NVIF_NOTIFY_DROP;
}
static int
usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
union {
struct nvif_ioctl_ntfy_new_v0 v0;
} *args = data;
union {
struct nvif_notify_req_v0 v0;
} *req;
struct usif_notify *ntfy;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
if (usif_notify_find(f, args->v0.index))
return -EEXIST;
} else
return ret;
req = data;
ret = -ENOSYS;
if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
return -ENOMEM;
atomic_set(&ntfy->enabled, 0);
if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
ntfy->route = req->v0.route;
ntfy->token = req->v0.token;
req->v0.route = NVDRM_NOTIFY_USIF;
req->v0.token = (unsigned long)(void *)ntfy;
ret = nvif_client_ioctl(client, argv, argc);
req->v0.token = ntfy->token;
req->v0.route = ntfy->route;
ntfy->handle = args->v0.index;
}
if (ret == 0)
list_add(&ntfy->head, &cli->notifys);
if (ret)
kfree(ntfy);
return ret;
}
static int
usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
union {
struct nvif_ioctl_ntfy_del_v0 v0;
} *args = data;
struct usif_notify *ntfy;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
if (!(ntfy = usif_notify_find(f, args->v0.index)))
return -ENOENT;
} else
return ret;
ret = nvif_client_ioctl(client, argv, argc);
if (ret == 0)
usif_notify_dtor(ntfy);
return ret;
}
static int
usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
union {
struct nvif_ioctl_ntfy_del_v0 v0;
} *args = data;
struct usif_notify *ntfy;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
if (!(ntfy = usif_notify_find(f, args->v0.index)))
return -ENOENT;
} else
return ret;
if (atomic_xchg(&ntfy->enabled, 1))
return 0;
ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
if (ret = -ENOMEM, !ntfy->p)
goto done;
ntfy->p->base.event = &ntfy->p->e.base;
ntfy->p->base.file_priv = f;
ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
ret = nvif_client_ioctl(client, argv, argc);
done:
if (ret) {
atomic_set(&ntfy->enabled, 0);
kfree(ntfy->p);
}
return ret;
}
static int
usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
union {
struct nvif_ioctl_ntfy_put_v0 v0;
} *args = data;
struct usif_notify *ntfy;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
if (!(ntfy = usif_notify_find(f, args->v0.index)))
return -ENOENT;
} else
return ret;
ret = nvif_client_ioctl(client, argv, argc);
if (ret == 0 && atomic_xchg(&ntfy->enabled, 0))
kfree(ntfy->p);
return ret;
}
struct usif_object {
struct list_head head;
struct list_head ntfy;
u8 route;
u64 token;
};
......@@ -369,16 +152,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
case NVIF_IOCTL_V0_NTFY_NEW:
ret = usif_notify_new(filp, data, size, argv, argc);
break;
case NVIF_IOCTL_V0_NTFY_DEL:
ret = usif_notify_del(filp, data, size, argv, argc);
break;
case NVIF_IOCTL_V0_NTFY_GET:
ret = usif_notify_get(filp, data, size, argv, argc);
break;
case NVIF_IOCTL_V0_NTFY_PUT:
ret = usif_notify_put(filp, data, size, argv, argc);
ret = -ENOSYS;
break;
default:
ret = nvif_client_ioctl(client, argv, argc);
......@@ -410,11 +187,6 @@ void
usif_client_fini(struct nouveau_cli *cli)
{
struct usif_object *object, *otemp;
struct usif_notify *notify, *ntemp;
list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) {
usif_notify_dtor(notify);
}
list_for_each_entry_safe(object, otemp, &cli->objects, head) {
usif_object_dtor(object);
......@@ -425,5 +197,4 @@ void
usif_client_init(struct nouveau_cli *cli)
{
INIT_LIST_HEAD(&cli->objects);
INIT_LIST_HEAD(&cli->notifys);
}
......@@ -250,7 +250,7 @@ nvif_object_dtor(struct nvif_object *object)
.ioctl.type = NVIF_IOCTL_V0_DEL,
};
if (!object->client)
if (!nvif_object_constructed(object))
return;
nvif_object_unmap(object);
......
......@@ -24,6 +24,7 @@
#include <core/ioctl.h>
#include <core/client.h>
#include <core/engine.h>
#include <core/event.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
......@@ -128,7 +129,7 @@ nvkm_ioctl_new(struct nvkm_client *client,
if (ret == 0) {
ret = nvkm_object_init(object);
if (ret == 0) {
list_add(&object->head, &parent->tree);
list_add_tail(&object->head, &parent->tree);
if (nvkm_object_insert(object)) {
client->data = object;
return 0;
......
......@@ -21,11 +21,35 @@
*/
#include "priv.h"
#include <core/gpuobj.h>
#include <core/object.h>
#include <nvif/class.h>
static int
gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_device *device = object->engine->subdev.device;
u32 size;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
}
const struct nvkm_object_func
gv100_ce_cclass = {
.bind = gv100_ce_cclass_bind,
};
static const struct nvkm_engine_func
gv100_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, VOLTA_DMA_COPY_A },
{}
......
......@@ -6,4 +6,6 @@
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
extern const struct nvkm_object_func gv100_ce_cclass;
#endif
......@@ -26,6 +26,7 @@
static const struct nvkm_engine_func
tu102_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
{}
......
......@@ -24,17 +24,17 @@
#include "acpi.h"
#include <core/device.h>
#include <subdev/clk.h>
#ifdef CONFIG_ACPI
static int
nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
{
struct nvkm_device *device =
container_of(nb, typeof(*device), acpi.nb);
struct nvkm_device *device = container_of(nb, typeof(*device), acpi.nb);
struct acpi_bus_event *info = data;
if (!strcmp(info->device_class, "ac_adapter"))
nvkm_event_send(&device->event, 1, 0, NULL, 0);
nvkm_clk_pwrsrc(device);
return NOTIFY_DONE;
}
......
......@@ -24,7 +24,6 @@
#include "priv.h"
#include "acpi.h"
#include <core/notify.h>
#include <core/option.h>
#include <subdev/bios.h>
......@@ -2668,24 +2667,6 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
};
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
if (!WARN_ON(size != 0)) {
notify->size = 0;
notify->types = 1;
notify->index = 0;
return 0;
}
return -EINVAL;
}
static const struct nvkm_event_func
nvkm_device_event_func = {
.ctor = nvkm_device_event_ctor,
};
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
......@@ -2838,8 +2819,6 @@ nvkm_device_del(struct nvkm_device **pdevice)
list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
nvkm_subdev_del(&subdev);
nvkm_event_fini(&device->event);
if (device->pri)
iounmap(device->pri);
list_del(&device->head);
......@@ -2914,10 +2893,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
if (ret)
goto done;
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
......
......@@ -346,6 +346,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
return -EINVAL;
oclass->base = sclass->base;
oclass->engine = NULL;
}
oclass->ctor = nvkm_udevice_child_new;
......
......@@ -144,30 +144,6 @@ nvkm_fifo_kevent_func = {
.ctor = nvkm_fifo_kevent_ctor,
};
static int
nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
if (size == 0) {
notify->size = 0;
notify->types = 1;
notify->index = 0;
return 0;
}
return -ENOSYS;
}
static const struct nvkm_event_func
nvkm_fifo_cevent_func = {
.ctor = nvkm_fifo_cevent_ctor,
};
void
nvkm_fifo_cevent(struct nvkm_fifo *fifo)
{
nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
}
static void
nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
......@@ -332,7 +308,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
if (fifo->func->dtor)
data = fifo->func->dtor(fifo);
nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent);
mutex_destroy(&fifo->mutex);
return data;
......@@ -378,9 +353,5 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return ret;
}
ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
if (ret)
return ret;
return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
}
......@@ -271,36 +271,6 @@ nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
return 0;
}
static int
nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (!chan->user)
return -ENOMEM;
}
if (unlikely(addr + 4 > chan->size))
return -EINVAL;
*data = ioread32_native(chan->user + addr);
return 0;
}
static int
nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
if (unlikely(!chan->user)) {
chan->user = ioremap(chan->addr, chan->size);
if (!chan->user)
return -ENOMEM;
}
if (unlikely(addr + 4 > chan->size))
return -EINVAL;
iowrite32_native(data, chan->user + addr);
return 0;
}
static int
nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
......@@ -332,9 +302,6 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
}
spin_unlock_irqrestore(&fifo->lock, flags);
if (chan->user)
iounmap(chan->user);
if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory);
nvkm_vmm_unref(&chan->vmm);
......@@ -352,8 +319,6 @@ nvkm_fifo_chan_func = {
.fini = nvkm_fifo_chan_fini,
.ntfy = nvkm_fifo_chan_ntfy,
.map = nvkm_fifo_chan_map,
.rd32 = nvkm_fifo_chan_rd32,
.wr32 = nvkm_fifo_chan_wr32,
.sclass = nvkm_fifo_chan_child_get,
};
......@@ -424,7 +389,5 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
chan->addr = device->func->resource_addr(device, bar) +
base + user * chan->chid;
chan->size = user;
nvkm_fifo_cevent(fifo);
return 0;
}
......@@ -14,8 +14,6 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
struct nvkm_memory *mthd;
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
......
......@@ -175,13 +175,19 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
if (!gk104_fifo_gpfifo_engine_addr(engine)) {
if (engine->subdev.type != NVKM_ENGINE_CE ||
engine->subdev.device->card_type < GV100)
return 0;
}
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
......@@ -231,7 +237,6 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
......
......@@ -70,8 +70,17 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
if (engine->subdev.type == NVKM_ENGINE_CE)
return gk104_fifo_gpfifo_kick(chan);
if (engine->subdev.type == NVKM_ENGINE_CE) {
ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
if (ret && suspend)
return ret;
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
nvkm_done(inst);
return ret;
}
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
if (ret && suspend)
......@@ -92,8 +101,16 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (engine->subdev.type == NVKM_ENGINE_CE)
return 0;
if (engine->subdev.type == NVKM_ENGINE_CE) {
const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}
nvkm_kmap(inst);
nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
......@@ -123,11 +140,9 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
u64 usermem, mthd;
u32 size;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
......@@ -173,20 +188,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
&chan->mthd);
if (ret)
return ret;
mthd = nvkm_memory_bar2(chan->mthd);
if (mthd == ~0ULL)
return -EFAULT;
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
......@@ -203,10 +204,8 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
return 0;
}
int
......
......@@ -7,7 +7,6 @@
int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
void nvkm_fifo_cevent(struct nvkm_fifo *);
void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
......
......@@ -26,7 +26,6 @@
#include <core/firmware.h>
#include <subdev/acr.h>
#include <subdev/secboot.h>
#include <nvfw/flcn.h>
......
......@@ -221,13 +221,3 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
mutex_init(&falcon->dmem_mutex);
return 0;
}
void
nvkm_falcon_del(struct nvkm_falcon **pfalcon)
{
if (*pfalcon) {
nvkm_falcon_dtor(*pfalcon);
kfree(*pfalcon);
*pfalcon = NULL;
}
}
......@@ -309,28 +309,3 @@ nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
nvkm_falcon_wr32(falcon, 0x014, 0xff);
falcon_v1_wait_idle(falcon);
}
static const struct nvkm_falcon_func
nvkm_falcon_v1 = {
.load_imem = nvkm_falcon_v1_load_imem,
.load_dmem = nvkm_falcon_v1_load_dmem,
.read_dmem = nvkm_falcon_v1_read_dmem,
.bind_context = nvkm_falcon_v1_bind_context,
.start = nvkm_falcon_v1_start,
.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
.enable = nvkm_falcon_v1_enable,
.disable = nvkm_falcon_v1_disable,
.set_start_addr = nvkm_falcon_v1_set_start_addr,
};
int
nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
struct nvkm_falcon **pfalcon)
{
struct nvkm_falcon *falcon;
if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
return -ENOMEM;
nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
return 0;
}
......@@ -330,7 +330,6 @@ nvkm_pstate_work(struct work_struct *work)
}
wake_up_all(&clk->wait);
nvkm_notify_get(&clk->pwrsrc_ntfy);
}
static int
......@@ -559,13 +558,12 @@ nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
return nvkm_pstate_calc(clk, true);
}
static int
nvkm_clk_pwrsrc(struct nvkm_notify *notify)
int
nvkm_clk_pwrsrc(struct nvkm_device *device)
{
struct nvkm_clk *clk =
container_of(notify, typeof(*clk), pwrsrc_ntfy);
nvkm_pstate_calc(clk, false);
return NVKM_NOTIFY_DROP;
if (device->clk)
return nvkm_pstate_calc(device->clk, false);
return 0;
}
/******************************************************************************
......@@ -582,7 +580,6 @@ static int
nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_clk *clk = nvkm_clk(subdev);
nvkm_notify_put(&clk->pwrsrc_ntfy);
flush_work(&clk->work);
if (clk->func->fini)
clk->func->fini(clk);
......@@ -629,8 +626,6 @@ nvkm_clk_dtor(struct nvkm_subdev *subdev)
struct nvkm_clk *clk = nvkm_clk(subdev);
struct nvkm_pstate *pstate, *temp;
nvkm_notify_fini(&clk->pwrsrc_ntfy);
/* Early return if the pstates have been provided statically */
if (clk->func->pstates)
return clk;
......@@ -692,11 +687,6 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
clk->state_nr = func->nr_pstates;
}
ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
NULL, 0, 0, &clk->pwrsrc_ntfy);
if (ret)
return ret;
mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment