Commit f80c2fb6 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-01-16' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-01-16

vGPU reset fixes from Changbin.
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents e4621b73 c34eaa8d
......@@ -158,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
POSTING_READ(fence_reg_lo);
}
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
{
int i;
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, 0);
}
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
......@@ -171,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
......@@ -201,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
......@@ -306,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
free_resource(vgpu);
}
/**
* intel_vgpu_reset_resource - reset resource state owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to reset resource state owned by a vGPU.
*
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
intel_runtime_pm_put(dev_priv);
}
/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
......
......@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
return 0;
}
/**
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
*
* @vgpu: a vGPU
* @primary: is the vGPU presented as primary
*
*/
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
if (!primary) {
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
}
/* Show guest that there isn't any stolen memory.*/
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
gvt_aperture_pa_base(gvt), true);
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER);
/*
* Clear the bar upper 32bit and let guest to assign the new value
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
gvt->dev_priv->drm.pdev, i * 2);
vgpu->cfg_space.bar[i].tracked = false;
}
}
/**
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
*
* @vgpu: a vGPU
*
*/
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
{
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
INTEL_GVT_PCI_CLASS_VGA_OTHER;
if (cmd & PCI_COMMAND_MEMORY) {
trap_gttmmio(vgpu, false);
map_aperture(vgpu, false);
}
/**
* Currently we only do such reset when vGPU is not
* owned by any VM, so we simply restore entire cfg
* space to default value.
*/
intel_vgpu_init_cfg_space(vgpu, primary);
}
......@@ -2277,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
}
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @vgpu: a vGPU
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
*
* This function is called from vfio core to reset reset all
* GTT related status, including GGTT, PPGTT, scratch page.
*
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
{
int i;
ppgtt_free_all_shadow_page(vgpu);
if (!dmlr)
return;
intel_vgpu_reset_ggtt(vgpu);
/* clear scratch page for security */
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL)
memset(page_address(vgpu->gtt.scratch_pt[i].page),
0, PAGE_SIZE);
}
}
......@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
......
......@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
......@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
......@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary);
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
......@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
struct intel_gvt_ops {
......
......@@ -231,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0;
}
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes, unsigned long bitmap)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
/*
* The current_vgpu will set to NULL after stopping the
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, bitmap);
/* full GPU reset */
if (bitmap == 0xff) {
mutex_unlock(&vgpu->gvt->lock);
intel_vgpu_clean_gtt(vgpu);
mutex_lock(&vgpu->gvt->lock);
setup_vgpu_mmio(vgpu);
populate_pvinfo_page(vgpu);
intel_vgpu_init_gtt(vgpu);
}
vgpu->resetting = false;
return 0;
}
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
void *p_data, unsigned int bytes)
{
unsigned int engine_mask = 0;
u32 data;
u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
bitmap = 0xff;
}
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
bitmap |= (1 << RCS);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
bitmap |= (1 << VCS);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
bitmap |= (1 << BCS);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
bitmap |= (1 << VECS);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
if (HAS_BSD2(vgpu->gvt->dev_priv))
bitmap |= (1 << VCS2);
engine_mask = ALL_ENGINES;
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
engine_mask |= (1 << RCS);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
engine_mask |= (1 << VCS);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
engine_mask |= (1 << BCS);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
engine_mask |= (1 << VECS);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
if (HAS_BSD2(vgpu->gvt->dev_priv))
engine_mask |= (1 << VCS2);
}
}
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
return 0;
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
......
......@@ -303,3 +303,56 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock);
return ret;
}
/**
* intel_vgpu_reset_mmio - reset virtual MMIO space
* @vgpu: a vGPU
*
*/
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
}
/**
* intel_vgpu_init_mmio - init MMIO space
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
if (!vgpu->mmio.vreg)
return -ENOMEM;
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
intel_vgpu_reset_mmio(vgpu);
return 0;
}
/**
* intel_vgpu_clean_mmio - clean MMIO space
* @vgpu: a vGPU
*
*/
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
......@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
*offset; \
})
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
......
......@@ -35,79 +35,6 @@
#include "gvt.h"
#include "i915_pvinfo.h"
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
if (vgpu->mmio.vreg)
memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
else {
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
if (!vgpu->mmio.vreg)
return -ENOMEM;
}
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
return 0;
}
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
if (!param->primary) {
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
}
/* Show guest that there isn't any stolen memory.*/
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
gvt_aperture_pa_base(gvt), true);
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER);
/*
* Clear the bar upper 32bit and let guest to assign the new value
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
gvt->dev_priv->drm.pdev, i * 2);
vgpu->cfg_space.bar[i].tracked = false;
}
}
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
......@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
clean_vgpu_mmio(vgpu);
intel_vgpu_clean_mmio(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
......@@ -300,9 +227,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
setup_vgpu_cfg_space(vgpu, param);
intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = setup_vgpu_mmio(vgpu);
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
goto out_clean_idr;
......@@ -354,7 +281,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
clean_vgpu_mmio(vgpu);
intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
......@@ -400,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
}
/**
* intel_gvt_reset_vgpu - reset a virtual GPU
* intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
* @vgpu: virtual GPU
* @dmlr: vGPU Device Model Level Reset or GT Reset
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
* device model reset or GT reset. The caller should hold the gvt lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
* is required both for functionary and security concerns.The ultimate goal
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
* assign a vGPU to a virtual machine we must isse such reset first.
*
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
* Unlike the FLR, GT reset only reset particular resource of a vGPU per
* the reset request. Guest driver can issue a GT reset by programming the
* virtual GDRST register to reset specific virtual GPU engine or all
* engines.
*
* The parameter dev_level is to identify if we will do DMLR or GT reset.
* The parameter engine_mask is to specific the engines that need to be
* resetted. If value ALL_ENGINES is given for engine_mask, it means
* the caller requests a full GT reset that we will reset all virtual
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
/*
* The current_vgpu will set to NULL after stopping the
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_reset_gtt(vgpu, dmlr);
intel_vgpu_reset_resource(vgpu);
intel_vgpu_reset_mmio(vgpu);
populate_pvinfo_page(vgpu);
if (dmlr)
intel_vgpu_reset_cfg_space(vgpu);
}
vgpu->resetting = false;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
/**
* intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
* @vgpu: virtual GPU
*
* This function is called when user wants to reset a virtual GPU.
......@@ -408,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->gvt->lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
mutex_unlock(&vgpu->gvt->lock);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment