Commit 358ce601 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: directly use instmem for runlists and polling areas

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent faf46898
...@@ -55,7 +55,6 @@ int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size, ...@@ -55,7 +55,6 @@ int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
u32 align, u32 flags, struct nvkm_gpuobj **); u32 align, u32 flags, struct nvkm_gpuobj **);
int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *, int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *,
struct nvkm_gpuobj **); struct nvkm_gpuobj **);
int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access, int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_vma *); struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *); void nvkm_gpuobj_unmap(struct nvkm_vma *);
......
...@@ -263,22 +263,6 @@ nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu, ...@@ -263,22 +263,6 @@ nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
(struct nvkm_object **)pgpuobj); (struct nvkm_object **)pgpuobj);
} }
int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
{
struct nvkm_memory *memory = gpuobj->memory;
struct nvkm_bar *bar = nvkm_bar(gpuobj);
int ret = -EINVAL;
if (bar && bar->umap) {
ret = bar->umap(bar, gpuobj->size, 12, vma);
if (ret == 0)
nvkm_memory_map(memory, vma, 0);
}
return ret;
}
int int
nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
u32 access, struct nvkm_vma *vma) u32 access, struct nvkm_vma *vma)
......
...@@ -455,6 +455,7 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -455,6 +455,7 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = (void *)parent;
struct nv50_fifo *fifo; struct nv50_fifo *fifo;
int ret; int ret;
...@@ -463,13 +464,13 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -463,13 +464,13 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
&fifo->playlist[0]); false, &fifo->playlist[0]);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
&fifo->playlist[1]); false, &fifo->playlist[1]);
if (ret) if (ret)
return ret; return ret;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <core/engctx.h> #include <core/engctx.h>
#include <core/enum.h> #include <core/enum.h>
#include <core/handle.h> #include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
#include <subdev/timer.h> #include <subdev/timer.h>
...@@ -42,13 +43,13 @@ struct gf100_fifo { ...@@ -42,13 +43,13 @@ struct gf100_fifo {
u64 mask; u64 mask;
struct { struct {
struct nvkm_gpuobj *mem[2]; struct nvkm_memory *mem[2];
int active; int active;
wait_queue_head_t wait; wait_queue_head_t wait;
} runlist; } runlist;
struct { struct {
struct nvkm_gpuobj *mem; struct nvkm_memory *mem;
struct nvkm_vma bar; struct nvkm_vma bar;
} user; } user;
int spoon_nr; int spoon_nr;
...@@ -78,7 +79,7 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo) ...@@ -78,7 +79,7 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
{ {
struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_gpuobj *cur; struct nvkm_memory *cur;
int i, p; int i, p;
mutex_lock(&nv_subdev(fifo)->mutex); mutex_lock(&nv_subdev(fifo)->mutex);
...@@ -96,7 +97,7 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo) ...@@ -96,7 +97,7 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
} }
nvkm_done(cur); nvkm_done(cur);
nvkm_wr32(device, 0x002270, cur->addr >> 12); nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3)); nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
if (wait_event_timeout(fifo->runlist.wait, if (wait_event_timeout(fifo->runlist.wait,
...@@ -238,10 +239,11 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -238,10 +239,11 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
for (i = 0; i < 0x1000; i += 4) for (i = 0; i < 0x1000; i += 4)
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_done(fifo->user.mem); nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
nvkm_kmap(ramfc); nvkm_kmap(ramfc);
nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
nvkm_wo32(ramfc, 0x10, 0x0000face); nvkm_wo32(ramfc, 0x10, 0x0000face);
nvkm_wo32(ramfc, 0x30, 0xfffff902); nvkm_wo32(ramfc, 0x30, 0xfffff902);
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
...@@ -879,6 +881,8 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -879,6 +881,8 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = (void *)parent;
struct nvkm_bar *bar = device->bar;
struct gf100_fifo *fifo; struct gf100_fifo *fifo;
int ret; int ret;
...@@ -889,28 +893,29 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -889,28 +893,29 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
INIT_WORK(&fifo->fault, gf100_fifo_recover_work); INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
&fifo->runlist.mem[0]); false, &fifo->runlist.mem[0]);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
&fifo->runlist.mem[1]); false, &fifo->runlist.mem[1]);
if (ret) if (ret)
return ret; return ret;
init_waitqueue_head(&fifo->runlist.wait); init_waitqueue_head(&fifo->runlist.wait);
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
&fifo->user.mem); 0x1000, false, &fifo->user.mem);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, ret = bar->umap(bar, 128 * 0x1000, 12, &fifo->user.bar);
&fifo->user.bar);
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent); ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
if (ret) if (ret)
return ret; return ret;
...@@ -927,10 +932,10 @@ gf100_fifo_dtor(struct nvkm_object *object) ...@@ -927,10 +932,10 @@ gf100_fifo_dtor(struct nvkm_object *object)
{ {
struct gf100_fifo *fifo = (void *)object; struct gf100_fifo *fifo = (void *)object;
nvkm_gpuobj_unmap(&fifo->user.bar); nvkm_vm_put(&fifo->user.bar);
nvkm_gpuobj_ref(NULL, &fifo->user.mem); nvkm_memory_del(&fifo->user.mem);
nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]); nvkm_memory_del(&fifo->runlist.mem[0]);
nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]); nvkm_memory_del(&fifo->runlist.mem[1]);
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <core/engctx.h> #include <core/engctx.h>
#include <core/enum.h> #include <core/enum.h>
#include <core/handle.h> #include <core/handle.h>
#include <subdev/bar.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
#include <subdev/timer.h> #include <subdev/timer.h>
...@@ -53,7 +54,7 @@ static const struct { ...@@ -53,7 +54,7 @@ static const struct {
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
struct gk104_fifo_engn { struct gk104_fifo_engn {
struct nvkm_gpuobj *runlist[2]; struct nvkm_memory *runlist[2];
int cur_runlist; int cur_runlist;
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
...@@ -66,7 +67,7 @@ struct gk104_fifo { ...@@ -66,7 +67,7 @@ struct gk104_fifo {
struct gk104_fifo_engn engine[FIFO_ENGINE_NR]; struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
struct { struct {
struct nvkm_gpuobj *mem; struct nvkm_memory *mem;
struct nvkm_vma bar; struct nvkm_vma bar;
} user; } user;
int spoon_nr; int spoon_nr;
...@@ -98,7 +99,7 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) ...@@ -98,7 +99,7 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
struct gk104_fifo_engn *engn = &fifo->engine[engine]; struct gk104_fifo_engn *engn = &fifo->engine[engine];
struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
struct nvkm_gpuobj *cur; struct nvkm_memory *cur;
int i, p; int i, p;
mutex_lock(&nv_subdev(fifo)->mutex); mutex_lock(&nv_subdev(fifo)->mutex);
...@@ -116,7 +117,7 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) ...@@ -116,7 +117,7 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
} }
nvkm_done(cur); nvkm_done(cur);
nvkm_wr32(device, 0x002270, cur->addr >> 12); nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
...@@ -296,10 +297,11 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -296,10 +297,11 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
for (i = 0; i < 0x200; i += 4) for (i = 0; i < 0x200; i += 4)
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_done(fifo->user.mem); nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
nvkm_kmap(ramfc); nvkm_kmap(ramfc);
nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
nvkm_wo32(ramfc, 0x10, 0x0000face); nvkm_wo32(ramfc, 0x10, 0x0000face);
nvkm_wo32(ramfc, 0x30, 0xfffff902); nvkm_wo32(ramfc, 0x30, 0xfffff902);
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
...@@ -1107,12 +1109,12 @@ gk104_fifo_dtor(struct nvkm_object *object) ...@@ -1107,12 +1109,12 @@ gk104_fifo_dtor(struct nvkm_object *object)
struct gk104_fifo *fifo = (void *)object; struct gk104_fifo *fifo = (void *)object;
int i; int i;
nvkm_gpuobj_unmap(&fifo->user.bar); nvkm_vm_put(&fifo->user.bar);
nvkm_gpuobj_ref(NULL, &fifo->user.mem); nvkm_memory_del(&fifo->user.mem);
for (i = 0; i < FIFO_ENGINE_NR; i++) { for (i = 0; i < FIFO_ENGINE_NR; i++) {
nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]); nvkm_memory_del(&fifo->engine[i].runlist[1]);
nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]); nvkm_memory_del(&fifo->engine[i].runlist[0]);
} }
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
...@@ -1123,6 +1125,8 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -1123,6 +1125,8 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = (void *)parent;
struct nvkm_bar *bar = device->bar;
struct gk104_fifo_impl *impl = (void *)oclass; struct gk104_fifo_impl *impl = (void *)oclass;
struct gk104_fifo *fifo; struct gk104_fifo *fifo;
int ret, i; int ret, i;
...@@ -1136,29 +1140,33 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -1136,29 +1140,33 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
INIT_WORK(&fifo->fault, gk104_fifo_recover_work); INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
for (i = 0; i < FIFO_ENGINE_NR; i++) { for (i = 0; i < FIFO_ENGINE_NR; i++) {
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0, &fifo->engine[i].runlist[0]); 0x8000, 0x1000, false,
&fifo->engine[i].runlist[0]);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0, &fifo->engine[i].runlist[1]); 0x8000, 0x1000, false,
&fifo->engine[i].runlist[1]);
if (ret) if (ret)
return ret; return ret;
init_waitqueue_head(&fifo->engine[i].wait); init_waitqueue_head(&fifo->engine[i].wait);
} }
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem); impl->channels * 0x200, 0x1000,
true, &fifo->user.mem);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, ret = bar->umap(bar, impl->channels * 0x200, 12, &fifo->user.bar);
&fifo->user.bar);
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent); ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
if (ret) if (ret)
return ret; return ret;
......
...@@ -41,7 +41,7 @@ static void ...@@ -41,7 +41,7 @@ static void
nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo) nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
{ {
struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_gpuobj *cur; struct nvkm_memory *cur;
int i, p; int i, p;
cur = fifo->playlist[fifo->cur_playlist]; cur = fifo->playlist[fifo->cur_playlist];
...@@ -54,7 +54,7 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo) ...@@ -54,7 +54,7 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
} }
nvkm_done(cur); nvkm_done(cur);
nvkm_wr32(device, 0x0032f4, cur->addr >> 12); nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
nvkm_wr32(device, 0x0032ec, p); nvkm_wr32(device, 0x0032ec, p);
nvkm_wr32(device, 0x002500, 0x00000101); nvkm_wr32(device, 0x002500, 0x00000101);
} }
...@@ -467,6 +467,7 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -467,6 +467,7 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = (void *)parent;
struct nv50_fifo *fifo; struct nv50_fifo *fifo;
int ret; int ret;
...@@ -475,13 +476,13 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -475,13 +476,13 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
&fifo->playlist[0]); false, &fifo->playlist[0]);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
&fifo->playlist[1]); false, &fifo->playlist[1]);
if (ret) if (ret)
return ret; return ret;
...@@ -499,8 +500,8 @@ nv50_fifo_dtor(struct nvkm_object *object) ...@@ -499,8 +500,8 @@ nv50_fifo_dtor(struct nvkm_object *object)
{ {
struct nv50_fifo *fifo = (void *)object; struct nv50_fifo *fifo = (void *)object;
nvkm_gpuobj_ref(NULL, &fifo->playlist[1]); nvkm_memory_del(&fifo->playlist[1]);
nvkm_gpuobj_ref(NULL, &fifo->playlist[0]); nvkm_memory_del(&fifo->playlist[0]);
nvkm_fifo_destroy(&fifo->base); nvkm_fifo_destroy(&fifo->base);
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
struct nv50_fifo { struct nv50_fifo {
struct nvkm_fifo base; struct nvkm_fifo base;
struct nvkm_gpuobj *playlist[2]; struct nvkm_memory *playlist[2];
int cur_playlist; int cur_playlist;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment