Commit c44c06ae authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/imem: cosmetic changes

This is purely preparation for upcoming commits, there should be no
code changes here.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent ac51596f
...@@ -21,7 +21,7 @@ nv_memobj(void *obj) ...@@ -21,7 +21,7 @@ nv_memobj(void *obj)
} }
struct nvkm_instmem { struct nvkm_instmem {
struct nvkm_subdev base; struct nvkm_subdev subdev;
struct list_head list; struct list_head list;
u32 reserved; u32 reserved;
......
...@@ -559,7 +559,7 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -559,7 +559,7 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv; struct nv04_fifo_priv *priv;
int ret; int ret;
......
...@@ -143,7 +143,7 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -143,7 +143,7 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv; struct nv04_fifo_priv *priv;
int ret; int ret;
......
...@@ -150,7 +150,7 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -150,7 +150,7 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv; struct nv04_fifo_priv *priv;
int ret; int ret;
......
...@@ -268,7 +268,7 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -268,7 +268,7 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv; struct nv04_fifo_priv *priv;
int ret; int ret;
......
...@@ -56,9 +56,9 @@ nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -56,9 +56,9 @@ nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&imem->base.mutex); mutex_lock(&imem->subdev.mutex);
list_add(&iobj->head, &imem->list); list_add(&iobj->head, &imem->list);
mutex_unlock(&imem->base.mutex); mutex_unlock(&imem->subdev.mutex);
return 0; return 0;
} }
...@@ -70,7 +70,7 @@ static int ...@@ -70,7 +70,7 @@ static int
nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent, nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
u32 size, u32 align, struct nvkm_object **pobject) u32 size, u32 align, struct nvkm_object **pobject)
{ {
struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass; struct nvkm_instmem_impl *impl = (void *)imem->subdev.object.oclass;
struct nvkm_instobj_args args = { .size = size, .align = align }; struct nvkm_instobj_args args = { .size = size, .align = align };
return nvkm_object_ctor(parent, &parent->engine->subdev.object, return nvkm_object_ctor(parent, &parent->engine->subdev.object,
impl->instobj, &args, sizeof(args), pobject); impl->instobj, &args, sizeof(args), pobject);
...@@ -84,7 +84,7 @@ _nvkm_instmem_fini(struct nvkm_object *object, bool suspend) ...@@ -84,7 +84,7 @@ _nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
int i, ret = 0; int i, ret = 0;
if (suspend) { if (suspend) {
mutex_lock(&imem->base.mutex); mutex_lock(&imem->subdev.mutex);
list_for_each_entry(iobj, &imem->list, head) { list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size); iobj->suspend = vmalloc(iobj->size);
if (!iobj->suspend) { if (!iobj->suspend) {
...@@ -95,12 +95,12 @@ _nvkm_instmem_fini(struct nvkm_object *object, bool suspend) ...@@ -95,12 +95,12 @@ _nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
for (i = 0; i < iobj->size; i += 4) for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i); iobj->suspend[i / 4] = nv_ro32(iobj, i);
} }
mutex_unlock(&imem->base.mutex); mutex_unlock(&imem->subdev.mutex);
if (ret) if (ret)
return ret; return ret;
} }
return nvkm_subdev_fini(&imem->base, suspend); return nvkm_subdev_fini(&imem->subdev, suspend);
} }
int int
...@@ -110,11 +110,11 @@ _nvkm_instmem_init(struct nvkm_object *object) ...@@ -110,11 +110,11 @@ _nvkm_instmem_init(struct nvkm_object *object)
struct nvkm_instobj *iobj; struct nvkm_instobj *iobj;
int ret, i; int ret, i;
ret = nvkm_subdev_init(&imem->base); ret = nvkm_subdev_init(&imem->subdev);
if (ret) if (ret)
return ret; return ret;
mutex_lock(&imem->base.mutex); mutex_lock(&imem->subdev.mutex);
list_for_each_entry(iobj, &imem->list, head) { list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) { if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4) for (i = 0; i < iobj->size; i += 4)
...@@ -123,7 +123,7 @@ _nvkm_instmem_init(struct nvkm_object *object) ...@@ -123,7 +123,7 @@ _nvkm_instmem_init(struct nvkm_object *object)
iobj->suspend = NULL; iobj->suspend = NULL;
} }
} }
mutex_unlock(&imem->base.mutex); mutex_unlock(&imem->subdev.mutex);
return 0; return 0;
} }
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include "priv.h" #include "priv.h"
struct gk20a_instobj_priv { struct gk20a_instobj {
struct nvkm_instobj base; struct nvkm_instobj base;
/* Must be second member here - see nouveau_gpuobj_map_vm() */ /* Must be second member here - see nouveau_gpuobj_map_vm() */
struct nvkm_mem *mem; struct nvkm_mem *mem;
...@@ -61,7 +61,7 @@ struct gk20a_instobj_priv { ...@@ -61,7 +61,7 @@ struct gk20a_instobj_priv {
* Used for objects allocated using the DMA API * Used for objects allocated using the DMA API
*/ */
struct gk20a_instobj_dma { struct gk20a_instobj_dma {
struct gk20a_instobj_priv base; struct gk20a_instobj base;
void *cpuaddr; void *cpuaddr;
dma_addr_t handle; dma_addr_t handle;
...@@ -72,13 +72,13 @@ struct gk20a_instobj_dma { ...@@ -72,13 +72,13 @@ struct gk20a_instobj_dma {
* Used for objects flattened using the IOMMU API * Used for objects flattened using the IOMMU API
*/ */
struct gk20a_instobj_iommu { struct gk20a_instobj_iommu {
struct gk20a_instobj_priv base; struct gk20a_instobj base;
/* array of base.mem->size pages */ /* array of base.mem->size pages */
struct page *pages[]; struct page *pages[];
}; };
struct gk20a_instmem_priv { struct gk20a_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
spinlock_t lock; spinlock_t lock;
u64 addr; u64 addr;
...@@ -105,60 +105,60 @@ struct gk20a_instmem_priv { ...@@ -105,60 +105,60 @@ struct gk20a_instmem_priv {
static u32 static u32
gk20a_instobj_rd32(struct nvkm_object *object, u64 offset) gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
{ {
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object); struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
struct gk20a_instobj_priv *node = (void *)object; struct gk20a_instobj *node = (void *)object;
unsigned long flags; unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
u32 data; u32 data;
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&imem->lock, flags);
if (unlikely(priv->addr != base)) { if (unlikely(imem->addr != base)) {
nv_wr32(priv, 0x001700, base >> 16); nv_wr32(imem, 0x001700, base >> 16);
priv->addr = base; imem->addr = base;
} }
data = nv_rd32(priv, 0x700000 + addr); data = nv_rd32(imem, 0x700000 + addr);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&imem->lock, flags);
return data; return data;
} }
static void static void
gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data) gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
{ {
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object); struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
struct gk20a_instobj_priv *node = (void *)object; struct gk20a_instobj *node = (void *)object;
unsigned long flags; unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&imem->lock, flags);
if (unlikely(priv->addr != base)) { if (unlikely(imem->addr != base)) {
nv_wr32(priv, 0x001700, base >> 16); nv_wr32(imem, 0x001700, base >> 16);
priv->addr = base; imem->addr = base;
} }
nv_wr32(priv, 0x700000 + addr, data); nv_wr32(imem, 0x700000 + addr, data);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&imem->lock, flags);
} }
static void static void
gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node) gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
{ {
struct gk20a_instobj_dma *node = (void *)_node; struct gk20a_instobj_dma *node = (void *)_node;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
struct device *dev = nv_device_base(nv_device(priv)); struct device *dev = nv_device_base(nv_device(imem));
if (unlikely(!node->cpuaddr)) if (unlikely(!node->cpuaddr))
return; return;
dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr, dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
node->handle, &priv->attrs); node->handle, &imem->attrs);
} }
static void static void
gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node) gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
{ {
struct gk20a_instobj_iommu *node = (void *)_node; struct gk20a_instobj_iommu *node = (void *)_node;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
struct nvkm_mm_node *r; struct nvkm_mm_node *r;
int i; int i;
...@@ -169,28 +169,28 @@ gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node) ...@@ -169,28 +169,28 @@ gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
rl_entry); rl_entry);
/* clear bit 34 to unmap pages */ /* clear bit 34 to unmap pages */
r->offset &= ~BIT(34 - priv->iommu_pgshift); r->offset &= ~BIT(34 - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */ /* Unmap pages from GPU address space and free them */
for (i = 0; i < _node->mem->size; i++) { for (i = 0; i < _node->mem->size; i++) {
iommu_unmap(priv->domain, iommu_unmap(imem->domain,
(r->offset + i) << priv->iommu_pgshift, PAGE_SIZE); (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
__free_page(node->pages[i]); __free_page(node->pages[i]);
} }
/* Release area from GPU address space */ /* Release area from GPU address space */
mutex_lock(priv->mm_mutex); mutex_lock(imem->mm_mutex);
nvkm_mm_free(priv->mm, &r); nvkm_mm_free(imem->mm, &r);
mutex_unlock(priv->mm_mutex); mutex_unlock(imem->mm_mutex);
} }
static void static void
gk20a_instobj_dtor(struct nvkm_object *object) gk20a_instobj_dtor(struct nvkm_object *object)
{ {
struct gk20a_instobj_priv *node = (void *)object; struct gk20a_instobj *node = (void *)object;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
if (priv->domain) if (imem->domain)
gk20a_instobj_dtor_iommu(node); gk20a_instobj_dtor_iommu(node);
else else
gk20a_instobj_dtor_dma(node); gk20a_instobj_dtor_dma(node);
...@@ -201,10 +201,10 @@ gk20a_instobj_dtor(struct nvkm_object *object) ...@@ -201,10 +201,10 @@ gk20a_instobj_dtor(struct nvkm_object *object)
static int static int
gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 npages, u32 align, struct nvkm_oclass *oclass, u32 npages, u32 align,
struct gk20a_instobj_priv **_node) struct gk20a_instobj **_node)
{ {
struct gk20a_instobj_dma *node; struct gk20a_instobj_dma *node;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct device *dev = nv_device_base(nv_device(parent)); struct device *dev = nv_device_base(nv_device(parent));
int ret; int ret;
...@@ -216,15 +216,15 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -216,15 +216,15 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL, &node->handle, GFP_KERNEL,
&priv->attrs); &imem->attrs);
if (!node->cpuaddr) { if (!node->cpuaddr) {
nv_error(priv, "cannot allocate DMA memory\n"); nv_error(imem, "cannot allocate DMA memory\n");
return -ENOMEM; return -ENOMEM;
} }
/* alignment check */ /* alignment check */
if (unlikely(node->handle & (align - 1))) if (unlikely(node->handle & (align - 1)))
nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n", nv_warn(imem, "memory not aligned as requested: %pad (0x%x)\n",
&node->handle, align); &node->handle, align);
/* present memory for being mapped using small pages */ /* present memory for being mapped using small pages */
...@@ -243,10 +243,10 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -243,10 +243,10 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
static int static int
gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 npages, u32 align, struct nvkm_oclass *oclass, u32 npages, u32 align,
struct gk20a_instobj_priv **_node) struct gk20a_instobj **_node)
{ {
struct gk20a_instobj_iommu *node; struct gk20a_instobj_iommu *node;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct nvkm_mm_node *r; struct nvkm_mm_node *r;
int ret; int ret;
int i; int i;
...@@ -269,38 +269,38 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -269,38 +269,38 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
node->pages[i] = p; node->pages[i] = p;
} }
mutex_lock(priv->mm_mutex); mutex_lock(imem->mm_mutex);
/* Reserve area from GPU address space */ /* Reserve area from GPU address space */
ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages, ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
align >> priv->iommu_pgshift, &r); align >> imem->iommu_pgshift, &r);
mutex_unlock(priv->mm_mutex); mutex_unlock(imem->mm_mutex);
if (ret) { if (ret) {
nv_error(priv, "virtual space is full!\n"); nv_error(imem, "virtual space is full!\n");
goto free_pages; goto free_pages;
} }
/* Map into GPU address space */ /* Map into GPU address space */
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
struct page *p = node->pages[i]; struct page *p = node->pages[i];
u32 offset = (r->offset + i) << priv->iommu_pgshift; u32 offset = (r->offset + i) << imem->iommu_pgshift;
ret = iommu_map(priv->domain, offset, page_to_phys(p), ret = iommu_map(imem->domain, offset, page_to_phys(p),
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) { if (ret < 0) {
nv_error(priv, "IOMMU mapping failure: %d\n", ret); nv_error(imem, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) { while (i-- > 0) {
offset -= PAGE_SIZE; offset -= PAGE_SIZE;
iommu_unmap(priv->domain, offset, PAGE_SIZE); iommu_unmap(imem->domain, offset, PAGE_SIZE);
} }
goto release_area; goto release_area;
} }
} }
/* Bit 34 tells that an address is to be resolved through the IOMMU */ /* Bit 34 tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(34 - priv->iommu_pgshift); r->offset |= BIT(34 - imem->iommu_pgshift);
node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift; node->base._mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
INIT_LIST_HEAD(&node->base._mem.regions); INIT_LIST_HEAD(&node->base._mem.regions);
list_add_tail(&r->rl_entry, &node->base._mem.regions); list_add_tail(&r->rl_entry, &node->base._mem.regions);
...@@ -308,9 +308,9 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -308,9 +308,9 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
return 0; return 0;
release_area: release_area:
mutex_lock(priv->mm_mutex); mutex_lock(imem->mm_mutex);
nvkm_mm_free(priv->mm, &r); nvkm_mm_free(imem->mm, &r);
mutex_unlock(priv->mm_mutex); mutex_unlock(imem->mm_mutex);
free_pages: free_pages:
for (i = 0; i < npages && node->pages[i] != NULL; i++) for (i = 0; i < npages && node->pages[i] != NULL; i++)
...@@ -325,19 +325,19 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -325,19 +325,19 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_instobj_args *args = data; struct nvkm_instobj_args *args = data;
struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct gk20a_instobj_priv *node; struct gk20a_instobj *node;
u32 size, align; u32 size, align;
int ret; int ret;
nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__, nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
priv->domain ? "IOMMU" : "DMA", args->size, args->align); imem->domain ? "IOMMU" : "DMA", args->size, args->align);
/* Round size and align to page bounds */ /* Round size and align to page bounds */
size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE); size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE); align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
if (priv->domain) if (imem->domain)
ret = gk20a_instobj_ctor_iommu(parent, engine, oclass, ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
size >> PAGE_SHIFT, align, &node); size >> PAGE_SHIFT, align, &node);
else else
...@@ -380,9 +380,9 @@ gk20a_instobj_oclass = { ...@@ -380,9 +380,9 @@ gk20a_instobj_oclass = {
static int static int
gk20a_instmem_fini(struct nvkm_object *object, bool suspend) gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
{ {
struct gk20a_instmem_priv *priv = (void *)object; struct gk20a_instmem *imem = (void *)object;
priv->addr = ~0ULL; imem->addr = ~0ULL;
return nvkm_instmem_fini(&priv->base, suspend); return nvkm_instmem_fini(&imem->base, suspend);
} }
static int static int
...@@ -390,37 +390,37 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -390,37 +390,37 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct gk20a_instmem_priv *priv; struct gk20a_instmem *imem;
struct nouveau_platform_device *plat; struct nouveau_platform_device *plat;
int ret; int ret;
ret = nvkm_instmem_create(parent, engine, oclass, &priv); ret = nvkm_instmem_create(parent, engine, oclass, &imem);
*pobject = nv_object(priv); *pobject = nv_object(imem);
if (ret) if (ret)
return ret; return ret;
spin_lock_init(&priv->lock); spin_lock_init(&imem->lock);
plat = nv_device_to_platform(nv_device(parent)); plat = nv_device_to_platform(nv_device(parent));
if (plat->gpu->iommu.domain) { if (plat->gpu->iommu.domain) {
priv->domain = plat->gpu->iommu.domain; imem->domain = plat->gpu->iommu.domain;
priv->mm = plat->gpu->iommu.mm; imem->mm = plat->gpu->iommu.mm;
priv->iommu_pgshift = plat->gpu->iommu.pgshift; imem->iommu_pgshift = plat->gpu->iommu.pgshift;
priv->mm_mutex = &plat->gpu->iommu.mutex; imem->mm_mutex = &plat->gpu->iommu.mutex;
nv_info(priv, "using IOMMU\n"); nv_info(imem, "using IOMMU\n");
} else { } else {
init_dma_attrs(&priv->attrs); init_dma_attrs(&imem->attrs);
/* /*
* We will access instmem through PRAMIN and thus do not need a * We will access instmem through PRAMIN and thus do not need a
* consistent CPU pointer or kernel mapping * consistent CPU pointer or kernel mapping
*/ */
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs); dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs); dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
nv_info(priv, "using DMA API\n"); nv_info(imem, "using DMA API\n");
} }
return 0; return 0;
......
...@@ -32,30 +32,27 @@ ...@@ -32,30 +32,27 @@
static u32 static u32
nv04_instobj_rd32(struct nvkm_object *object, u64 addr) nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
{ {
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv04_instmem *imem = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object; struct nv04_instobj *node = (void *)object;
return nv_ro32(priv, node->mem->offset + addr); return nv_ro32(imem, node->mem->offset + addr);
} }
static void static void
nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data) nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
{ {
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv04_instmem *imem = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object; struct nv04_instobj *node = (void *)object;
nv_wo32(priv, node->mem->offset + addr, data); nv_wo32(imem, node->mem->offset + addr, data);
} }
static void static void
nv04_instobj_dtor(struct nvkm_object *object) nv04_instobj_dtor(struct nvkm_object *object)
{ {
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv04_instmem *imem = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object; struct nv04_instobj *node = (void *)object;
struct nvkm_subdev *subdev = (void *)priv; mutex_lock(&imem->base.subdev.mutex);
nvkm_mm_free(&imem->heap, &node->mem);
mutex_lock(&subdev->mutex); mutex_unlock(&imem->base.subdev.mutex);
nvkm_mm_free(&priv->heap, &node->mem);
mutex_unlock(&subdev->mutex);
nvkm_instobj_destroy(&node->base); nvkm_instobj_destroy(&node->base);
} }
...@@ -64,10 +61,9 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -64,10 +61,9 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); struct nv04_instmem *imem = (void *)nvkm_instmem(parent);
struct nv04_instobj_priv *node; struct nv04_instobj *node;
struct nvkm_instobj_args *args = data; struct nvkm_instobj_args *args = data;
struct nvkm_subdev *subdev = (void *)priv;
int ret; int ret;
if (!args->align) if (!args->align)
...@@ -78,10 +74,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -78,10 +74,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&subdev->mutex); mutex_lock(&imem->base.subdev.mutex);
ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, ret = nvkm_mm_head(&imem->heap, 0, 1, args->size, args->size,
args->align, &node->mem); args->align, &node->mem);
mutex_unlock(&subdev->mutex); mutex_unlock(&imem->base.subdev.mutex);
if (ret) if (ret)
return ret; return ret;
...@@ -121,15 +117,15 @@ nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data) ...@@ -121,15 +117,15 @@ nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
void void
nv04_instmem_dtor(struct nvkm_object *object) nv04_instmem_dtor(struct nvkm_object *object)
{ {
struct nv04_instmem_priv *priv = (void *)object; struct nv04_instmem *imem = (void *)object;
nvkm_gpuobj_ref(NULL, &priv->ramfc); nvkm_gpuobj_ref(NULL, &imem->ramfc);
nvkm_gpuobj_ref(NULL, &priv->ramro); nvkm_gpuobj_ref(NULL, &imem->ramro);
nvkm_ramht_ref(NULL, &priv->ramht); nvkm_ramht_ref(NULL, &imem->ramht);
nvkm_gpuobj_ref(NULL, &priv->vbios); nvkm_gpuobj_ref(NULL, &imem->vbios);
nvkm_mm_fini(&priv->heap); nvkm_mm_fini(&imem->heap);
if (priv->iomem) if (imem->iomem)
iounmap(priv->iomem); iounmap(imem->iomem);
nvkm_instmem_destroy(&priv->base); nvkm_instmem_destroy(&imem->base);
} }
static int static int
...@@ -137,41 +133,41 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -137,41 +133,41 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv04_instmem_priv *priv; struct nv04_instmem *imem;
int ret; int ret;
ret = nvkm_instmem_create(parent, engine, oclass, &priv); ret = nvkm_instmem_create(parent, engine, oclass, &imem);
*pobject = nv_object(priv); *pobject = nv_object(imem);
if (ret) if (ret)
return ret; return ret;
/* PRAMIN aperture maps over the end of VRAM, reserve it */ /* PRAMIN aperture maps over the end of VRAM, reserve it */
priv->base.reserved = 512 * 1024; imem->base.reserved = 512 * 1024;
ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret) if (ret)
return ret; return ret;
/* 0x00000-0x10000: reserve for probable vbios image */ /* 0x00000-0x10000: reserve for probable vbios image */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
&priv->vbios); &imem->vbios);
if (ret) if (ret)
return ret; return ret;
/* 0x10000-0x18000: reserve for RAMHT */ /* 0x10000-0x18000: reserve for RAMHT */
ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); ret = nvkm_ramht_new(nv_object(imem), NULL, 0x08000, 0, &imem->ramht);
if (ret) if (ret)
return ret; return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00800, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); NVOBJ_FLAG_ZERO_ALLOC, &imem->ramfc);
if (ret) if (ret)
return ret; return ret;
/* 0x18800-0x18a00: reserve for RAMRO */ /* 0x18800-0x18a00: reserve for RAMRO */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00200, 0, 0,
&priv->ramro); &imem->ramro);
if (ret) if (ret)
return ret; return ret;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
extern struct nvkm_instobj_impl nv04_instobj_oclass; extern struct nvkm_instobj_impl nv04_instobj_oclass;
struct nv04_instmem_priv { struct nv04_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
void __iomem *iomem; void __iomem *iomem;
...@@ -18,13 +18,13 @@ struct nv04_instmem_priv { ...@@ -18,13 +18,13 @@ struct nv04_instmem_priv {
struct nvkm_gpuobj *ramfc; struct nvkm_gpuobj *ramfc;
}; };
static inline struct nv04_instmem_priv * static inline struct nv04_instmem *
nv04_instmem(void *obj) nv04_instmem(void *obj)
{ {
return (void *)nvkm_instmem(obj); return (void *)nvkm_instmem(obj);
} }
struct nv04_instobj_priv { struct nv04_instobj {
struct nvkm_instobj base; struct nvkm_instobj base;
struct nvkm_mm_node *mem; struct nvkm_mm_node *mem;
}; };
......
...@@ -33,15 +33,15 @@ ...@@ -33,15 +33,15 @@
static u32 static u32
nv40_instmem_rd32(struct nvkm_object *object, u64 addr) nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
{ {
struct nv04_instmem_priv *priv = (void *)object; struct nv04_instmem *imem = (void *)object;
return ioread32_native(priv->iomem + addr); return ioread32_native(imem->iomem + addr);
} }
static void static void
nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data) nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{ {
struct nv04_instmem_priv *priv = (void *)object; struct nv04_instmem *imem = (void *)object;
iowrite32_native(data, priv->iomem + addr); iowrite32_native(data, imem->iomem + addr);
} }
static int static int
...@@ -50,11 +50,11 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -50,11 +50,11 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nv04_instmem_priv *priv; struct nv04_instmem *imem;
int ret, bar, vs; int ret, bar, vs;
ret = nvkm_instmem_create(parent, engine, oclass, &priv); ret = nvkm_instmem_create(parent, engine, oclass, &imem);
*pobject = nv_object(priv); *pobject = nv_object(imem);
if (ret) if (ret)
return ret; return ret;
...@@ -64,10 +64,10 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -64,10 +64,10 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
else else
bar = 3; bar = 3;
priv->iomem = ioremap(nv_device_resource_start(device, bar), imem->iomem = ioremap(nv_device_resource_start(device, bar),
nv_device_resource_len(device, bar)); nv_device_resource_len(device, bar));
if (!priv->iomem) { if (!imem->iomem) {
nv_error(priv, "unable to map PRAMIN BAR\n"); nv_error(imem, "unable to map PRAMIN BAR\n");
return -EFAULT; return -EFAULT;
} }
...@@ -75,46 +75,46 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -75,46 +75,46 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
* to fit graphics contexts for every channel, the magics come * to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c * from engine/gr/nv40.c
*/ */
vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); vs = hweight8((nv_rd32(imem, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs; else if (nv44_gr_class(imem)) imem->base.reserved = 0x4980 * vs;
else priv->base.reserved = 0x4a40 * vs; else imem->base.reserved = 0x4a40 * vs;
priv->base.reserved += 16 * 1024; imem->base.reserved += 16 * 1024;
priv->base.reserved *= 32; /* per-channel */ imem->base.reserved *= 32; /* per-channel */
priv->base.reserved += 512 * 1024; /* pci(e)gart table */ imem->base.reserved += 512 * 1024; /* pci(e)gart table */
priv->base.reserved += 512 * 1024; /* object storage */ imem->base.reserved += 512 * 1024; /* object storage */
priv->base.reserved = round_up(priv->base.reserved, 4096); imem->base.reserved = round_up(imem->base.reserved, 4096);
ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret) if (ret)
return ret; return ret;
/* 0x00000-0x10000: reserve for probable vbios image */ /* 0x00000-0x10000: reserve for probable vbios image */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
&priv->vbios); &imem->vbios);
if (ret) if (ret)
return ret; return ret;
/* 0x10000-0x18000: reserve for RAMHT */ /* 0x10000-0x18000: reserve for RAMHT */
ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); ret = nvkm_ramht_new(nv_object(imem), NULL, 0x08000, 0, &imem->ramht);
if (ret) if (ret)
return ret; return ret;
/* 0x18000-0x18200: reserve for RAMRO /* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding * 0x18200-0x20000: padding
*/ */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x08000, 0, 0,
&priv->ramro); &imem->ramro);
if (ret) if (ret)
return ret; return ret;
/* 0x20000-0x21000: reserve for RAMFC /* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap * 0x21000-0x40000: padding and some unknown crap
*/ */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x20000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); NVOBJ_FLAG_ZERO_ALLOC, &imem->ramfc);
if (ret) if (ret)
return ret; return ret;
......
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
#include <subdev/fb.h> #include <subdev/fb.h>
struct nv50_instmem_priv { struct nv50_instmem {
struct nvkm_instmem base; struct nvkm_instmem base;
spinlock_t lock; spinlock_t lock;
u64 addr; u64 addr;
}; };
struct nv50_instobj_priv { struct nv50_instobj {
struct nvkm_instobj base; struct nvkm_instobj base;
struct nvkm_mem *mem; struct nvkm_mem *mem;
}; };
...@@ -43,45 +43,45 @@ struct nv50_instobj_priv { ...@@ -43,45 +43,45 @@ struct nv50_instobj_priv {
static u32 static u32
nv50_instobj_rd32(struct nvkm_object *object, u64 offset) nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
{ {
struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv50_instmem *imem = (void *)nvkm_instmem(object);
struct nv50_instobj_priv *node = (void *)object; struct nv50_instobj *node = (void *)object;
unsigned long flags; unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
u32 data; u32 data;
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&imem->lock, flags);
if (unlikely(priv->addr != base)) { if (unlikely(imem->addr != base)) {
nv_wr32(priv, 0x001700, base >> 16); nv_wr32(imem, 0x001700, base >> 16);
priv->addr = base; imem->addr = base;
} }
data = nv_rd32(priv, 0x700000 + addr); data = nv_rd32(imem, 0x700000 + addr);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&imem->lock, flags);
return data; return data;
} }
static void static void
nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data) nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
{ {
struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv50_instmem *imem = (void *)nvkm_instmem(object);
struct nv50_instobj_priv *node = (void *)object; struct nv50_instobj *node = (void *)object;
unsigned long flags; unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&imem->lock, flags);
if (unlikely(priv->addr != base)) { if (unlikely(imem->addr != base)) {
nv_wr32(priv, 0x001700, base >> 16); nv_wr32(imem, 0x001700, base >> 16);
priv->addr = base; imem->addr = base;
} }
nv_wr32(priv, 0x700000 + addr, data); nv_wr32(imem, 0x700000 + addr, data);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&imem->lock, flags);
} }
static void static void
nv50_instobj_dtor(struct nvkm_object *object) nv50_instobj_dtor(struct nvkm_object *object)
{ {
struct nv50_instobj_priv *node = (void *)object; struct nv50_instobj *node = (void *)object;
struct nvkm_fb *fb = nvkm_fb(object); struct nvkm_fb *fb = nvkm_fb(object);
fb->ram->put(fb, &node->mem); fb->ram->put(fb, &node->mem);
nvkm_instobj_destroy(&node->base); nvkm_instobj_destroy(&node->base);
...@@ -94,7 +94,7 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -94,7 +94,7 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
{ {
struct nvkm_fb *fb = nvkm_fb(parent); struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_instobj_args *args = data; struct nvkm_instobj_args *args = data;
struct nv50_instobj_priv *node; struct nv50_instobj *node;
int ret; int ret;
args->size = max((args->size + 4095) & ~4095, (u32)4096); args->size = max((args->size + 4095) & ~4095, (u32)4096);
...@@ -134,9 +134,9 @@ nv50_instobj_oclass = { ...@@ -134,9 +134,9 @@ nv50_instobj_oclass = {
static int static int
nv50_instmem_fini(struct nvkm_object *object, bool suspend) nv50_instmem_fini(struct nvkm_object *object, bool suspend)
{ {
struct nv50_instmem_priv *priv = (void *)object; struct nv50_instmem *imem = (void *)object;
priv->addr = ~0ULL; imem->addr = ~0ULL;
return nvkm_instmem_fini(&priv->base, suspend); return nvkm_instmem_fini(&imem->base, suspend);
} }
static int static int
...@@ -144,15 +144,15 @@ nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -144,15 +144,15 @@ nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nv50_instmem_priv *priv; struct nv50_instmem *imem;
int ret; int ret;
ret = nvkm_instmem_create(parent, engine, oclass, &priv); ret = nvkm_instmem_create(parent, engine, oclass, &imem);
*pobject = nv_object(priv); *pobject = nv_object(imem);
if (ret) if (ret)
return ret; return ret;
spin_lock_init(&priv->lock); spin_lock_init(&imem->lock);
return 0; return 0;
} }
......
...@@ -36,14 +36,14 @@ struct nvkm_instmem_impl { ...@@ -36,14 +36,14 @@ struct nvkm_instmem_impl {
#define nvkm_instmem_create(p,e,o,d) \ #define nvkm_instmem_create(p,e,o,d) \
nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d) nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nvkm_instmem_destroy(p) \ #define nvkm_instmem_destroy(p) \
nvkm_subdev_destroy(&(p)->base) nvkm_subdev_destroy(&(p)->subdev)
#define nvkm_instmem_init(p) ({ \ #define nvkm_instmem_init(p) ({ \
struct nvkm_instmem *imem = (p); \ struct nvkm_instmem *_imem = (p); \
_nvkm_instmem_init(nv_object(imem)); \ _nvkm_instmem_init(nv_object(_imem)); \
}) })
#define nvkm_instmem_fini(p,s) ({ \ #define nvkm_instmem_fini(p,s) ({ \
struct nvkm_instmem *imem = (p); \ struct nvkm_instmem *_imem = (p); \
_nvkm_instmem_fini(nv_object(imem), (s)); \ _nvkm_instmem_fini(nv_object(_imem), (s)); \
}) })
int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *, int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment