Commit db8feb69 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel into drm-next

- gpu idling rework for s/r (Imre)
- vlv mappable scanout fix
- speed up probing in resume (Lyude)
- dp audio workarounds for gen9 (Dhinakaran)
- more conversion to using dev_priv internally (Ville)
- more gen9+ wm fixes and cleanups (Maarten)
- shrinker cleanup&fixes (Chris)
- reorg plane init code (Ville)
- implement support for multiple timelines (prep work for scheduler)
  from Chris and all
- untangle dev->struct_mutex locking as prep for multiple timelines
  (Chris)
- refactor bxt phy code and collect it all in intel_dpio_phy.c (Ander)
- another gvt with bugfixes all over from Zhenyu
- piles of lspcon fixes from Imre
- 90/270 rotation fixes (Ville)
- guc log buffer support (Akash+Sagar)
- fbc fixes from Paulo
- untangle rpm vs. tiling-fences/mmaps (Chris)
- fix atomic commit to wait on the right fences (Daniel Stone)

* tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel: (181 commits)
  drm/i915: Update DRIVER_DATE to 20161108
  drm/i915: Mark CPU cache as dirty when used for rendering
  drm/i915: Add assert for no pending GPU requests during suspend/resume in LR mode
  drm/i915: Make sure engines are idle during GPU idling in LR mode
  drm/i915: Avoid early GPU idling due to race with new request
  drm/i915: Avoid early GPU idling due to already pending idle work
  drm/i915: Limit Valleyview and earlier to only using mappable scanout
  drm/i915: Round tile chunks up for constructing partial VMAs
  drm/i915: Remove the vma from the object list upon close
  drm/i915: Reinit polling before hpd when resuming
  drm/i915: Remove redundant reprobe in i915_drm_resume
  drm/i915/dp: Extend BDW DP audio workaround to GEN9 platforms
  drm/i915/dp: BDW cdclk fix for DP audio
  drm/i915: Fix pages pin counting around swizzle quirk
  drm/i915: Fix test on inputs for vma_compare()
  drm/i915/guc: Cache the client mapping
  drm/i915: Tidy slab cache allocations
  drm/i915: Introduce HAS_64BIT_RELOC
  drm/i915: Show the execlist queue in debugfs/i915_engine_info
  drm/i915: Unify global_list into global_link
  ...
parents afdd548f 58e197d6
...@@ -189,7 +189,7 @@ Display Refresh Rate Switching (DRRS) ...@@ -189,7 +189,7 @@ Display Refresh Rate Switching (DRRS)
DPIO DPIO
---- ----
.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h .. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
:doc: DPIO :doc: DPIO
CSR firmware support for DMC CSR firmware support for DMC
......
...@@ -4060,7 +4060,6 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) ...@@ -4060,7 +4060,6 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@intel.com> M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com> M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
W: https://01.org/linuxgraphics/ W: https://01.org/linuxgraphics/
Q: http://patchwork.freedesktop.org/project/intel-gfx/ Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel T: git git://anongit.freedesktop.org/drm-intel
......
...@@ -11,6 +11,7 @@ config DRM_I915 ...@@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL
select DRM_MIPI_DSI select DRM_MIPI_DSI
select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled # i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick # but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI select BACKLIGHT_LCD_SUPPORT if ACPI
...@@ -24,16 +25,17 @@ config DRM_I915 ...@@ -24,16 +25,17 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics. Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by This driver is used by the Intel driver in X.org 6.8 and
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It XFree86 4.4 and above. It replaces the older i830 module that
replaces the older i830 module that supported a subset of the supported a subset of the hardware in older X.org releases.
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely i810 driver instead, and the Atom z5xx series has an entirely
different implementation. different implementation.
If "M" is selected, the module will be called i915.
config DRM_I915_PRELIMINARY_HW_SUPPORT config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default" bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915 depends on DRM_I915
...@@ -85,6 +87,7 @@ config DRM_I915_USERPTR ...@@ -85,6 +87,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support" bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915 depends on DRM_I915
depends on 64BIT
default n default n
help help
Choose this option if you want to enable Intel GVT-g graphics Choose this option if you want to enable Intel GVT-g graphics
......
...@@ -35,16 +35,19 @@ i915-y += i915_cmd_parser.o \ ...@@ -35,16 +35,19 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \ i915_gem_execbuffer.o \
i915_gem_fence.o \ i915_gem_fence.o \
i915_gem_gtt.o \ i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \ i915_gem.o \
i915_gem_render_state.o \ i915_gem_render_state.o \
i915_gem_request.o \ i915_gem_request.o \
i915_gem_shrinker.o \ i915_gem_shrinker.o \
i915_gem_stolen.o \ i915_gem_stolen.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \ i915_gem_userptr.o \
i915_trace_points.o \ i915_trace_points.o \
intel_breadcrumbs.o \ intel_breadcrumbs.o \
intel_engine_cs.o \ intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \ intel_lrc.o \
intel_mocs.o \ intel_mocs.o \
intel_ringbuffer.o \ intel_ringbuffer.o \
......
...@@ -1145,7 +1145,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1145,7 +1145,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->event = PRIMARY_B_FLIP_DONE; info->event = PRIMARY_B_FLIP_DONE;
break; break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_C: case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
info->pipe = PIPE_B; info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE; info->event = PRIMARY_C_FLIP_DONE;
break; break;
default: default:
...@@ -1201,20 +1201,19 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1201,20 +1201,19 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
#define write_bits(reg, e, s, v) do { \ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \ info->surf_val << 12);
vgpu_vreg(vgpu, reg) |= (v << s); \ if (IS_SKYLAKE(dev_priv)) {
} while (0) set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
write_bits(info->surf_reg, 31, 12, info->surf_val); set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
if (IS_SKYLAKE(dev_priv)) info->tile_val << 10);
write_bits(info->stride_reg, 9, 0, info->stride_val); } else {
else set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
write_bits(info->stride_reg, 15, 6, info->stride_val); info->stride_val << 6);
write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10, set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
10, info->tile_val); info->tile_val << 10);
}
#undef write_bits
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event); intel_vgpu_trigger_virtual_event(vgpu, info->event);
......
...@@ -276,7 +276,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) ...@@ -276,7 +276,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
pte = readq(addr); pte = readq(addr);
#else #else
pte = ioread32(addr); pte = ioread32(addr);
pte |= ioread32(addr + 4) << 32; pte |= (u64)ioread32(addr + 4) << 32;
#endif #endif
return pte; return pte;
} }
...@@ -1944,7 +1944,7 @@ static int create_scratch_page(struct intel_vgpu *vgpu) ...@@ -1944,7 +1944,7 @@ static int create_scratch_page(struct intel_vgpu *vgpu)
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr); mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr); gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
__free_page(gtt->scratch_page); __free_page(gtt->scratch_page);
gtt->scratch_page = NULL; gtt->scratch_page = NULL;
return -ENXIO; return -ENXIO;
......
...@@ -65,6 +65,8 @@ struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = { ...@@ -65,6 +65,8 @@ struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
*/ */
int intel_gvt_init_host(void) int intel_gvt_init_host(void)
{ {
int ret;
if (intel_gvt_host.initialized) if (intel_gvt_host.initialized)
return 0; return 0;
...@@ -90,7 +92,8 @@ int intel_gvt_init_host(void) ...@@ -90,7 +92,8 @@ int intel_gvt_init_host(void)
return -EINVAL; return -EINVAL;
/* Try to detect if we're running in host instead of VM. */ /* Try to detect if we're running in host instead of VM. */
if (!intel_gvt_hypervisor_detect_host()) ret = intel_gvt_hypervisor_detect_host();
if (ret)
return -ENODEV; return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n", gvt_dbg_core("Running with hypervisor %s in host mode\n",
...@@ -103,19 +106,20 @@ int intel_gvt_init_host(void) ...@@ -103,19 +106,20 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8; info->max_support_vgpus = 8;
info->cfg_space_size = 256; info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024; info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0; info->mmio_bar = 0;
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8; info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3; info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8; info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024; info->max_surface_size = 36 * 1024 * 1024;
} }
info->msi_cap_offset = pdev->msi_cap;
} }
static int gvt_service_thread(void *data) static int gvt_service_thread(void *data)
......
...@@ -382,6 +382,8 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); ...@@ -382,6 +382,8 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
#include "mpt.h" #include "mpt.h"
......
...@@ -239,7 +239,11 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -239,7 +239,11 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
vgpu->resetting = true; vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu); intel_vgpu_stop_schedule(vgpu);
if (scheduler->current_vgpu == vgpu) { /*
* The current_vgpu will set to NULL after stopping the
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
mutex_unlock(&vgpu->gvt->lock); mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu); intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock); mutex_lock(&vgpu->gvt->lock);
...@@ -247,6 +251,16 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -247,6 +251,16 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
intel_vgpu_reset_execlist(vgpu, bitmap); intel_vgpu_reset_execlist(vgpu, bitmap);
/* full GPU reset */
if (bitmap == 0xff) {
mutex_unlock(&vgpu->gvt->lock);
intel_vgpu_clean_gtt(vgpu);
mutex_lock(&vgpu->gvt->lock);
setup_vgpu_mmio(vgpu);
populate_pvinfo_page(vgpu);
intel_vgpu_init_gtt(vgpu);
}
vgpu->resetting = false; vgpu->resetting = false;
return 0; return 0;
...@@ -258,6 +272,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -258,6 +272,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data; u32 data;
u64 bitmap = 0; u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) { if (data & GEN6_GRDOM_FULL) {
...@@ -1305,7 +1320,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1305,7 +1320,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist; struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ret; int ret = 0;
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1)) if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL; return -EINVAL;
...@@ -1313,12 +1328,15 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1313,12 +1328,15 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist = &vgpu->execlist[ring_id]; execlist = &vgpu->execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id); ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_err("fail submit workload on ring %d\n", ring_id);
}
++execlist->elsp_dwords.index; ++execlist->elsp_dwords.index;
execlist->elsp_dwords.index &= 0x3; execlist->elsp_dwords.index &= 0x3;
return 0; return ret;
} }
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
......
...@@ -163,7 +163,7 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) ...@@ -163,7 +163,7 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
*/ */
void intel_gvt_clean_opregion(struct intel_gvt *gvt) void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{ {
iounmap(gvt->opregion.opregion_va); memunmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL; gvt->opregion.opregion_va = NULL;
} }
...@@ -181,8 +181,8 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt) ...@@ -181,8 +181,8 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt)
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION, pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa); &gvt->opregion.opregion_pa);
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa, gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE); INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
if (!gvt->opregion.opregion_va) { if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n"); gvt_err("fail to map host opregion\n");
return -EFAULT; return -EFAULT;
......
...@@ -118,6 +118,7 @@ static u32 gen9_render_mocs_L3[32]; ...@@ -118,6 +118,7 @@ static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum forcewake_domains fw;
i915_reg_t reg; i915_reg_t reg;
u32 regs[] = { u32 regs[] = {
[RCS] = 0x4260, [RCS] = 0x4260,
...@@ -135,11 +136,25 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -135,11 +136,25 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
reg = _MMIO(regs[ring_id]); reg = _MMIO(regs[ring_id]);
I915_WRITE(reg, 0x1); /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && IS_SKYLAKE(dev_priv))
fw |= FORCEWAKE_RENDER;
if (wait_for_atomic((I915_READ(reg) == 0), 50)) intel_uncore_forcewake_get(dev_priv, fw);
I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
intel_uncore_forcewake_put(dev_priv, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
} }
...@@ -162,6 +177,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -162,6 +177,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv)) if (!IS_SKYLAKE(dev_priv))
return; return;
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset); gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset)); I915_WRITE(offset, vgpu_vreg(vgpu, offset));
...@@ -199,6 +215,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -199,6 +215,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv)) if (!IS_SKYLAKE(dev_priv))
return; return;
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset); vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]); I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
......
...@@ -400,21 +400,27 @@ static int workload_thread(void *priv) ...@@ -400,21 +400,27 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id; int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
long lret;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p); kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id); gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
ret = wait_event_interruptible(scheduler->waitq[ring_id], add_wait_queue(&scheduler->waitq[ring_id], &wait);
kthread_should_stop() || do {
(workload = pick_next_workload(gvt, ring_id))); workload = pick_next_workload(gvt, ring_id);
if (workload)
WARN_ON_ONCE(ret); break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
if (kthread_should_stop()) MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
remove_wait_queue(&scheduler->waitq[ring_id], &wait);
if (!workload)
break; break;
mutex_lock(&scheduler_mutex); mutex_lock(&scheduler_mutex);
...@@ -444,10 +450,12 @@ static int workload_thread(void *priv) ...@@ -444,10 +450,12 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload); workload->ring_id, workload);
workload->status = i915_wait_request(workload->req, lret = i915_wait_request(workload->req,
0, NULL, NULL); 0, MAX_SCHEDULE_TIMEOUT);
if (workload->status != 0) if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n"); gvt_err("fail to wait workload, skip\n");
}
complete: complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n", gvt_dbg_sched("will complete workload %p\n, status: %d\n",
......
...@@ -41,7 +41,7 @@ static void clean_vgpu_mmio(struct intel_vgpu *vgpu) ...@@ -41,7 +41,7 @@ static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
} }
static int setup_vgpu_mmio(struct intel_vgpu *vgpu) int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info; const struct intel_gvt_device_info *info = &gvt->device_info;
...@@ -103,7 +103,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, ...@@ -103,7 +103,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
} }
} }
static void populate_pvinfo_page(struct intel_vgpu *vgpu) void populate_pvinfo_page(struct intel_vgpu *vgpu)
{ {
/* setup the ballooning information */ /* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
......
...@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
} }
if (ret == 0 && needs_clflush_after) if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len); drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj); i915_gem_object_unpin_map(shadow_batch_obj);
return ret; return ret;
......
This diff is collapsed.
...@@ -537,14 +537,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ...@@ -537,14 +537,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch, .can_switch = i915_switcheroo_can_switch,
}; };
static void i915_gem_fini(struct drm_device *dev) static void i915_gem_fini(struct drm_i915_private *dev_priv)
{ {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_cleanup_engines(dev); i915_gem_cleanup_engines(&dev_priv->drm);
i915_gem_context_fini(dev); i915_gem_context_fini(&dev_priv->drm);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
rcu_barrier();
flush_work(&dev_priv->mm.free_work);
WARN_ON(!list_empty(&to_i915(dev)->context_list)); WARN_ON(!list_empty(&dev_priv->context_list));
} }
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
...@@ -592,7 +595,9 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -592,7 +595,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need /* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); ret = intel_modeset_init(dev);
if (ret)
goto cleanup_irq;
intel_guc_init(dev); intel_guc_init(dev);
...@@ -619,7 +624,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -619,7 +624,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem: cleanup_gem:
if (i915_gem_suspend(dev)) if (i915_gem_suspend(dev))
DRM_ERROR("failed to idle hardware; continuing to unload!\n"); DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev); i915_gem_fini(dev_priv);
cleanup_irq: cleanup_irq:
intel_guc_fini(dev); intel_guc_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
...@@ -825,10 +830,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -825,10 +830,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv); intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv); intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv); intel_irq_init(dev_priv);
intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv); intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv); intel_init_audio_hooks(dev_priv);
i915_gem_load_init(&dev_priv->drm); ret = i915_gem_load_init(&dev_priv->drm);
if (ret < 0)
goto err_gvt;
intel_display_crc_init(dev_priv); intel_display_crc_init(dev_priv);
...@@ -838,6 +846,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -838,6 +846,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0; return 0;
err_gvt:
intel_gvt_cleanup(dev_priv);
err_workqueues: err_workqueues:
i915_workqueues_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv);
return ret; return ret;
...@@ -972,7 +982,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) ...@@ -972,7 +982,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv) static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{ {
struct pci_dev *pdev = dev_priv->drm.pdev; struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
int ret; int ret;
if (i915_inject_load_failure()) if (i915_inject_load_failure())
...@@ -1030,7 +1039,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1030,7 +1039,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB, * behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully. * which also needs to be handled carefully.
*/ */
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) { if (ret) {
...@@ -1111,6 +1120,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) ...@@ -1111,6 +1120,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */ /* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) { if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv); i915_debugfs_register(dev_priv);
i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv); i915_setup_sysfs(dev_priv);
} else } else
DRM_ERROR("Failed to register driver for userspace access!\n"); DRM_ERROR("Failed to register driver for userspace access!\n");
...@@ -1149,6 +1159,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) ...@@ -1149,6 +1159,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv); intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv); i915_teardown_sysfs(dev_priv);
i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv); i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm); drm_dev_unregister(&dev_priv->drm);
...@@ -1303,7 +1314,7 @@ void i915_driver_unload(struct drm_device *dev) ...@@ -1303,7 +1314,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq); drain_workqueue(dev_priv->wq);
intel_guc_fini(dev); intel_guc_fini(dev);
i915_gem_fini(dev); i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv); intel_power_domains_fini(dev_priv);
...@@ -1425,7 +1436,7 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -1425,7 +1436,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv); intel_suspend_encoders(dev_priv);
intel_suspend_hw(dev); intel_suspend_hw(dev_priv);
i915_gem_suspend_gtt_mappings(dev); i915_gem_suspend_gtt_mappings(dev);
...@@ -1589,6 +1600,8 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1589,6 +1600,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev); intel_display_resume(dev);
drm_kms_helper_poll_enable(dev);
/* /*
* ... but also need to make sure that hotplug processing * ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't * doesn't cause havoc. Like in the driver load code we don't
...@@ -1596,8 +1609,6 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1596,8 +1609,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications. * notifications.
* */ * */
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv); intel_opregion_register(dev_priv);
...@@ -1610,7 +1621,6 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1610,7 +1621,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0); intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv); intel_autoenable_gt_powersave(dev_priv);
drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
...@@ -2254,7 +2264,6 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -2254,7 +2264,6 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume) bool rpm_resume)
{ {
struct drm_device *dev = &dev_priv->drm;
int err; int err;
int ret; int ret;
...@@ -2278,10 +2287,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, ...@@ -2278,10 +2287,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv); vlv_check_no_gt_access(dev_priv);
if (rpm_resume) { if (rpm_resume)
intel_init_clock_gating(dev); intel_init_clock_gating(dev_priv);
i915_gem_restore_fences(dev);
}
return ret; return ret;
} }
...@@ -2301,32 +2308,13 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2301,32 +2308,13 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n"); DRM_DEBUG_KMS("Suspending device\n");
/*
* We could deadlock here in case another thread holding struct_mutex
* calls RPM suspend concurrently, since the RPM suspend will wait
* first for this RPM suspend to finish. In this case the concurrent
* RPM resume will be followed by its RPM suspend counterpart. Still
* for consistency return -EAGAIN, which will reschedule this suspend.
*/
if (!mutex_trylock(&dev->struct_mutex)) {
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
/*
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
pm_runtime_mark_last_busy(kdev);
return -EAGAIN;
}
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
/* /*
* We are safe here against re-faults, since the fault handler takes * We are safe here against re-faults, since the fault handler takes
* an RPM reference. * an RPM reference.
*/ */
i915_gem_release_all_mmaps(dev_priv); i915_gem_runtime_suspend(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev); intel_guc_suspend(dev);
...@@ -2591,7 +2579,7 @@ static struct drm_driver driver = { ...@@ -2591,7 +2579,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid, .set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object, .gem_close_object = i915_gem_close_object,
.gem_free_object = i915_gem_free_object, .gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops, .gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
......
This diff is collapsed.
This diff is collapsed.
...@@ -31,4 +31,6 @@ ...@@ -31,4 +31,6 @@
#define GEM_BUG_ON(expr) #define GEM_BUG_ON(expr)
#endif #endif
#define I915_NUM_ENGINES 5
#endif /* __I915_GEM_H__ */ #endif /* __I915_GEM_H__ */
...@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) ...@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&pool->cache_list[n], &pool->cache_list[n],
batch_pool_link) batch_pool_link)
i915_gem_object_put(obj); __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]); INIT_LIST_HEAD(&pool->cache_list[n]);
} }
...@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size) size_t size)
{ {
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next; struct drm_i915_gem_object *tmp;
struct list_head *list; struct list_head *list;
int n; int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
...@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1; n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n]; list = &pool->cache_list[n];
list_for_each_entry_safe(tmp, next, list, batch_pool_link) { list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */ /* The batches are strictly LRU ordered */
if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id], if (i915_gem_object_is_active(tmp))
&tmp->base.dev->struct_mutex))
break; break;
/* While we're looping, do some clean up */ GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
if (tmp->madv == __I915_MADV_PURGED) { true));
list_del(&tmp->batch_pool_link);
i915_gem_object_put(tmp);
continue;
}
if (tmp->base.size >= size) { if (tmp->base.size >= size) {
/* Clear the set of shared fences early */
ww_mutex_lock(&tmp->resv->lock, NULL);
reservation_object_add_excl_fence(tmp->resv, NULL);
ww_mutex_unlock(&tmp->resv->lock);
obj = tmp; obj = tmp;
break; break;
} }
} }
if (obj == NULL) { if (obj == NULL) {
int ret; obj = i915_gem_object_create_internal(pool->engine->i915, size);
obj = i915_gem_object_create(&pool->engine->i915->drm, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
obj->madv = I915_MADV_DONTNEED;
} }
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ERR_PTR(ret);
list_move_tail(&obj->batch_pool_link, list); list_move_tail(&obj->batch_pool_link, list);
i915_gem_object_pin_pages(obj);
return obj; return obj;
} }
...@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring) if (ce->ring)
intel_ring_free(ce->ring); intel_ring_free(ce->ring);
i915_vma_put(ce->state); __i915_gem_object_release_unless_active(ce->state->obj);
} }
kfree(ctx->name);
put_pid(ctx->pid); put_pid(ctx->pid);
list_del(&ctx->link); list_del(&ctx->link);
...@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev, ...@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
if (file_priv != NULL) { ret = DEFAULT_CONTEXT_HANDLE;
if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx, ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err_out; goto err_out;
} else }
ret = DEFAULT_CONTEXT_HANDLE; ctx->user_handle = ret;
ctx->file_priv = file_priv; ctx->file_priv = file_priv;
if (file_priv) if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
current->comm,
pid_nr(ctx->pid),
ctx->user_handle);
if (!ctx->name) {
ret = -ENOMEM;
goto err_pid;
}
}
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there * loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */ * is no remap info, it will be a NOP. */
...@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev, ...@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx; return ctx;
err_pid:
put_pid(ctx->pid);
idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out: err_out:
context_close(ctx); context_close(ctx);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx; return ctx;
if (USES_FULL_PPGTT(dev)) { if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt;
i915_ppgtt_create(to_i915(dev), file_priv);
ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) { if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt)); PTR_ERR(ppgtt));
...@@ -751,12 +764,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, ...@@ -751,12 +764,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false; return false;
} }
struct i915_vma *
i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
unsigned int flags)
{
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
/* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ERR_PTR(ret);
}
ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
if (ret)
return ERR_PTR(ret);
return vma;
}
static int do_rcs_switch(struct drm_i915_gem_request *req) static int do_rcs_switch(struct drm_i915_gem_request *req)
{ {
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct i915_vma *vma = to->engine[RCS].state; struct i915_vma *vma;
struct i915_gem_context *from; struct i915_gem_context *from;
u32 hw_flags; u32 hw_flags;
int ret, i; int ret, i;
...@@ -764,17 +801,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -764,17 +801,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to)) if (skip_rcs_switch(ppgtt, engine, to))
return 0; return 0;
/* Clear this page out of any CPU caches for coherent swap-in/out. */
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ret;
}
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL); vma = i915_gem_context_pin_legacy(to, 0);
if (ret) if (IS_ERR(vma))
return ret; return PTR_ERR(vma);
/* /*
* Pin can switch back to the default context if we end up calling into * Pin can switch back to the default context if we end up calling into
...@@ -931,22 +961,33 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -931,22 +961,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_timeline *timeline;
enum intel_engine_id id; enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret; int ret;
if (engine->last_context == NULL)
continue;
if (engine->last_context == dev_priv->kernel_context)
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context); req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct drm_i915_gem_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence_gfp(&req->submit,
&prev->submit,
GFP_KERNEL);
}
ret = i915_switch_context(req); ret = i915_switch_context(req);
i915_add_request_no_flush(req); i915_add_request_no_flush(req);
if (ret) if (ret)
......
...@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst; struct scatterlist *src, *dst;
int ret, i; int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
goto err; goto err;
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */ /* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) { if (st == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unpin; goto err_unpin_pages;
} }
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret) if (ret)
goto err_free; goto err_free;
src = obj->pages->sgl; src = obj->mm.pages->sgl;
dst = st->sgl; dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) { for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);
} }
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret =-ENOMEM; ret = -ENOMEM;
goto err_free_sg; goto err_free_sg;
} }
mutex_unlock(&obj->base.dev->struct_mutex);
return st; return st;
err_free_sg: err_free_sg:
sg_free_table(st); sg_free_table(st);
err_free: err_free:
kfree(st); kfree(st);
err_unpin: err_unpin_pages:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err: err:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, ...@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg); sg_free_table(sg);
kfree(sg); kfree(sg);
mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
} }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
void *addr;
int ret;
ret = i915_mutex_lock_interruptible(dev); return i915_gem_object_pin_map(obj, I915_MAP_WB);
if (ret)
return ERR_PTR(ret);
addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
mutex_unlock(&dev->struct_mutex);
return addr;
} }
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
} }
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
...@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err;
ret = i915_mutex_lock_interruptible(dev); err = i915_gem_object_pin_pages(obj);
if (ret) if (err)
return ret; return err;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
ret = i915_gem_object_set_to_cpu_domain(obj, write); err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret;
out:
i915_gem_object_unpin_pages(obj);
return err;
} }
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
int ret; int err;
ret = i915_mutex_lock_interruptible(dev); err = i915_gem_object_pin_pages(obj);
if (ret) if (err)
return ret; return err;
ret = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; out:
i915_gem_object_unpin_pages(obj);
return err;
} }
static const struct dma_buf_ops i915_dmabuf_ops = { static const struct dma_buf_ops i915_dmabuf_ops = {
...@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = { ...@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access, .end_cpu_access = i915_gem_end_cpu_access,
}; };
static void export_fences(struct drm_i915_gem_object *obj,
struct dma_buf *dma_buf)
{
struct reservation_object *resv = dma_buf->resv;
struct drm_i915_gem_request *req;
unsigned long active;
int idx;
active = __I915_BO_ACTIVE(obj);
if (!active)
return;
/* Serialise with execbuf to prevent concurrent fence-loops */
mutex_lock(&obj->base.dev->struct_mutex);
/* Mark the object for future fences before racily adding old fences */
obj->base.dma_buf = dma_buf;
ww_mutex_lock(&resv->lock, NULL);
for_each_active(active, idx) {
req = i915_gem_active_get(&obj->last_read[idx],
&obj->base.dev->struct_mutex);
if (!req)
continue;
if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &req->fence);
i915_gem_request_put(req);
}
req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req) {
reservation_object_add_excl_fence(resv, &req->fence);
i915_gem_request_put(req);
}
ww_mutex_unlock(&resv->lock);
mutex_unlock(&obj->base.dev->struct_mutex);
}
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags) struct drm_gem_object *gem_obj, int flags)
{ {
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info); DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops; exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size; exp_info.size = gem_obj->size;
exp_info.flags = flags; exp_info.flags = flags;
exp_info.priv = gem_obj; exp_info.priv = gem_obj;
exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) { if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj); int ret = obj->ops->dmabuf_export(obj);
...@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
dma_buf = drm_gem_dmabuf_export(dev, &exp_info); return drm_gem_dmabuf_export(dev, &exp_info);
if (IS_ERR(dma_buf))
return dma_buf;
export_fences(obj, dma_buf);
return dma_buf;
} }
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) static struct sg_table *
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
struct sg_table *sg; return dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL);
sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg))
return PTR_ERR(sg);
obj->pages = sg;
return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
dma_buf_unmap_attachment(obj->base.import_attach, dma_buf_unmap_attachment(obj->base.import_attach, pages,
obj->pages, DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
...@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, ...@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size); drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach; obj->base.import_attach = attach;
obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is /* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all * neither in the GPU cache nor in the CPU cache, where all
......
...@@ -33,14 +33,17 @@ ...@@ -33,14 +33,17 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
static bool static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
gpu_is_idle(struct drm_i915_private *dev_priv)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_active(engine)) struct intel_timeline *tl;
tl = &ggtt->base.timeline.engine[engine->id];
if (i915_gem_active_isset(&tl->last_request))
return false; return false;
} }
...@@ -56,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) ...@@ -56,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list))) if (WARN_ON(!list_empty(&vma->exec_list)))
return false; return false;
if (flags & PIN_NONFAULT && vma->obj->fault_mappable) if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false; return false;
list_add(&vma->exec_list, unwind); list_add(&vma->exec_list, unwind);
...@@ -103,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -103,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
int ret; int ret;
lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags); trace_i915_gem_evict(vm, min_size, alignment, flags);
/* /*
...@@ -153,7 +157,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -153,7 +157,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC; return -ENOSPC;
if (gpu_is_idle(dev_priv)) { if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop /* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to * back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts. * acquire our locks and unpin the old scanouts.
...@@ -213,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target) ...@@ -213,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{ {
struct drm_mm_node *node, *next; struct drm_mm_node *node, *next;
lockdep_assert_held(&target->vm->dev->struct_mutex);
list_for_each_entry_safe(node, next, list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list, &target->vm->mm.head_node.node_list,
node_list) { node_list) {
...@@ -266,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -266,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
int ret; int ret;
WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex)); lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm); trace_i915_gem_evict_vm(vm);
if (do_idle) { if (do_idle) {
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_dmabuf.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
#include "intel_frontbuffer.h" #include "intel_frontbuffer.h"
...@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache, ...@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1; cache->page = -1;
cache->vaddr = 0; cache->vaddr = 0;
cache->i915 = i915; cache->i915 = i915;
cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8; /* Must be a variable in the struct to allow GCC to unroll. */
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false; cache->node.allocated = false;
} }
...@@ -418,15 +418,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -418,15 +418,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset; unsigned long offset;
void *vaddr; void *vaddr;
if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
cache->node.start, I915_CACHE_NONE, 0);
cache->page = page;
return unmask_page(cache->vaddr);
}
if (cache->vaddr) { if (cache->vaddr) {
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else { } else {
...@@ -466,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -466,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base, ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
...@@ -1109,44 +1101,20 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -1109,44 +1101,20 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret; return ret;
} }
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
{
unsigned int mask;
mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
mask <<= I915_BO_ACTIVE_SHIFT;
return mask;
}
static int static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct reservation_object *resv;
if (obj->flags & other_rings) { ret = i915_gem_request_await_object
ret = i915_gem_request_await_object (req, obj, obj->base.pending_write_domain);
(req, obj, obj->base.pending_write_domain); if (ret)
if (ret) return ret;
return ret;
}
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv) {
ret = i915_sw_fence_await_reservation
(&req->submit, resv, &i915_fence_ops,
obj->base.pending_write_domain, 10*HZ,
GFP_KERNEL | __GFP_NOWARN);
if (ret < 0)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false); i915_gem_clflush_object(obj, false);
...@@ -1279,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -1279,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx; return ctx;
} }
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req, struct drm_i915_gem_request *req,
unsigned int flags) unsigned int flags)
...@@ -1288,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1288,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
obj->dirty = 1; /* be paranoid */
/* Add a reference if we're newly entering the active list. /* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is * The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list, * vital here: mark_active adds to the start of the callback list,
...@@ -1297,37 +1269,32 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1297,37 +1269,32 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped * add the active reference first and queue for it to be dropped
* *last*. * *last*.
*/ */
if (!i915_gem_object_is_active(obj)) if (!i915_vma_is_active(vma))
i915_gem_object_get(obj); obj->active_count++;
i915_gem_object_set_active(obj, idx); i915_vma_set_active(vma, idx);
i915_gem_active_set(&obj->last_read[idx], req); i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) { if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&obj->last_write, req); i915_gem_active_set(&vma->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS); intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */ /* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
} }
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req); i915_gem_active_set(&vma->last_fence, req);
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
} }
static void eb_export_fence(struct drm_i915_gem_object *obj, static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req, struct drm_i915_gem_request *req,
unsigned int flags) unsigned int flags)
{ {
struct reservation_object *resv; struct reservation_object *resv = obj->resv;
resv = i915_gem_object_get_dmabuf_resv(obj);
if (!resv)
return;
/* Ignore errors from failing to allocate the new fence, we can't /* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed * handle an error right now. Worst case should be missed
......
...@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(to_i915(vma->vm->dev)); assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */ /* Just update our place in the LRU if our fence is getting reused. */
...@@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma)
* @dev: DRM device * @dev: DRM device
* *
* Restore the hw fence state to match the software tracking again, to be called * Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. * after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/ */
void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_restore_fences(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
int i; int i;
/* Note that this may be called outside of struct_mutex, by
* runtime suspend/resume. The barrier we require is enforced by
* rpm itself - all access to fences/GTT are only within an rpm
* wakeref, and to acquire that wakeref you must pass through here.
*/
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma; struct i915_vma *vma = reg->vma;
...@@ -391,7 +389,7 @@ void i915_gem_restore_fences(struct drm_device *dev) ...@@ -391,7 +389,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
*/ */
if (vma && !i915_gem_object_is_tiled(vma->obj)) { if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty); GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(vma->obj->fault_mappable); GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
list_move(&reg->link, &dev_priv->mm.fence_list); list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL; vma->fence = NULL;
...@@ -646,6 +644,7 @@ i915_gem_swizzle_page(struct page *page) ...@@ -646,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
/** /**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object * @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
* *
* This function fixes up the swizzling in case any page frame number for this * This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with * object has changed in bit 17 since that state has been saved with
...@@ -656,7 +655,8 @@ i915_gem_swizzle_page(struct page *page) ...@@ -656,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again). * by swapping them out and back in again).
*/ */
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
...@@ -666,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -666,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return; return;
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page); i915_gem_swizzle_page(page);
set_page_dirty(page); set_page_dirty(page);
} }
...@@ -680,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -680,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/** /**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object * @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
* *
* This function saves the bit 17 of each page frame number so that swizzling * This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned. * be called before the backing storage can be unpinned.
*/ */
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
if (obj->bit_17 == NULL) { if (obj->bit_17 == NULL) {
...@@ -705,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -705,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17)) if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
......
This diff is collapsed.
...@@ -211,6 +211,7 @@ struct i915_vma { ...@@ -211,6 +211,7 @@ struct i915_vma {
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct i915_gem_active last_fence; struct i915_gem_active last_fence;
/** /**
...@@ -226,6 +227,7 @@ struct i915_vma { ...@@ -226,6 +227,7 @@ struct i915_vma {
struct list_head vm_link; struct list_head vm_link;
struct list_head obj_link; /* Link in the object's VMA list */ struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
/** This vma's place in the batchbuffer or on the eviction list */ /** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list; struct list_head exec_list;
...@@ -341,6 +343,7 @@ struct i915_pml4 { ...@@ -341,6 +343,7 @@ struct i915_pml4 {
struct i915_address_space { struct i915_address_space {
struct drm_mm mm; struct drm_mm mm;
struct i915_gem_timeline timeline;
struct drm_device *dev; struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global /* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In * GTT that is owned by the driver (and so @file is set to NULL). In
...@@ -612,7 +615,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); ...@@ -612,7 +615,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_device *dev); int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref); void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv); struct drm_i915_file_private *fpriv,
const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{ {
if (ppgtt) if (ppgtt)
...@@ -628,8 +632,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); ...@@ -628,8 +632,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
/* Flags used by pin/bind&friends. */ /* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0) #define PIN_NONBLOCK BIT(0)
......
/*
* Copyright © 2014-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
for (sg = st->sgl; sg; sg = __sg_next(sg))
__free_pages(sg_page(sg), get_order(sg->length));
sg_free_table(st);
kfree(st);
}
static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int npages = obj->base.size / PAGE_SIZE;
struct sg_table *st;
struct scatterlist *sg;
int max_order;
gfp_t gfp;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
st->nents = 0;
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
/* 965gm cannot relocate objects above 4GiB. */
gfp &= ~__GFP_HIGHMEM;
gfp |= __GFP_DMA32;
}
do {
int order = min(fls(npages) - 1, max_order);
struct page *page;
do {
page = alloc_pages(gfp | (order ? QUIET : 0), order);
if (page)
break;
if (!order--)
goto err;
/* Limit subsequent allocations as well */
max_order = order;
} while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0);
st->nents++;
npages -= 1 << order;
if (!npages) {
sg_mark_end(sg);
break;
}
sg = __sg_next(sg);
} while (1);
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
* and the caller is expected to repopulate - the contents of this
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
return st;
err:
sg_mark_end(sg);
internal_free_pages(st);
return ERR_PTR(-ENOMEM);
}
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
i915_gem_gtt_finish_pages(obj, pages);
internal_free_pages(pages);
obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
};
/**
* Creates a new object that wraps some internal memory for private use.
* This object is not backed by swappable storage, and as such its contents
* are volatile and only valid whilst pinned. If the object is reaped by the
* shrinker, its pages and data will be discarded. Equally, it is not a full
* GEM object and so not valid for access from userspace. This makes it useful
* for hardware interfaces like ringbuffers (which are pinned from the time
* the request is written to the time the hardware stops accessing it), but
* not for contexts (which need to be preserved when not active for later
* reuse). Note that it is not cleared upon allocation.
*/
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
unsigned int size)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(&i915->drm);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
}
...@@ -28,17 +28,19 @@ ...@@ -28,17 +28,19 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_renderstate.h" #include "intel_renderstate.h"
struct render_state { struct intel_render_state {
const struct intel_renderstate_rodata *rodata; const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma; struct i915_vma *vma;
u32 aux_batch_size; u32 batch_offset;
u32 aux_batch_offset; u32 batch_size;
u32 aux_offset;
u32 aux_size;
}; };
static const struct intel_renderstate_rodata * static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct drm_i915_gem_request *req) render_state_get_rodata(const struct intel_engine_cs *engine)
{ {
switch (INTEL_GEN(req->i915)) { switch (INTEL_GEN(engine->i915)) {
case 6: case 6:
return &gen6_null_state; return &gen6_null_state;
case 7: case 7:
...@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req) ...@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/ */
#define OUT_BATCH(batch, i, val) \ #define OUT_BATCH(batch, i, val) \
do { \ do { \
if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \ if ((i) >= PAGE_SIZE / sizeof(u32)) \
ret = -ENOSPC; \ goto err; \
goto err_out; \
} \
(batch)[(i)++] = (val); \ (batch)[(i)++] = (val); \
} while(0) } while(0)
static int render_state_setup(struct render_state *so) static int render_state_setup(struct intel_render_state *so,
struct drm_i915_private *i915)
{ {
struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
const struct intel_renderstate_rodata *rodata = so->rodata; const struct intel_renderstate_rodata *rodata = so->rodata;
const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8; struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0; unsigned int i = 0, reloc_index = 0;
struct page *page; unsigned int needs_clflush;
u32 *d; u32 *d;
int ret; int ret;
ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true); ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret) if (ret)
return ret; return ret;
page = i915_gem_object_get_dirty_page(so->vma->obj, 0); d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
d = kmap(page);
while (i < rodata->batch_items) { while (i < rodata->batch_items) {
u32 s = rodata->batch[i]; u32 s = rodata->batch[i];
...@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so) ...@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) { if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start; u64 r = s + so->vma->node.start;
s = lower_32_bits(r); s = lower_32_bits(r);
if (has_64bit_reloc) { if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items || if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0) { rodata->batch[i + 1] != 0)
ret = -EINVAL; goto err;
goto err_out;
}
d[i++] = s; d[i++] = s;
s = upper_32_bits(r); s = upper_32_bits(r);
...@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so) ...@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s; d[i++] = s;
} }
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
goto err;
}
so->batch_offset = so->vma->node.start;
so->batch_size = rodata->batch_items * sizeof(u32);
while (i % CACHELINE_DWORDS) while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP); OUT_BATCH(d, i, MI_NOOP);
so->aux_batch_offset = i * sizeof(u32); so->aux_offset = i * sizeof(u32);
if (HAS_POOLED_EU(dev_priv)) { if (HAS_POOLED_EU(i915)) {
/* /*
* We always program 3x6 pool config but depending upon which * We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config * subslice is disabled HW drops down to appropriate config
...@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so) ...@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
} }
OUT_BATCH(d, i, MI_BATCH_BUFFER_END); OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; so->aux_size = i * sizeof(u32) - so->aux_offset;
so->aux_offset += so->batch_offset;
/* /*
* Since we are sending length, we need to strictly conform to * Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8. * all requirements. For Gen2 this must be a multiple of 8.
*/ */
so->aux_batch_size = ALIGN(so->aux_batch_size, 8); so->aux_size = ALIGN(so->aux_size, 8);
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
if (ret)
return ret;
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
return 0; if (needs_clflush)
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
err_out: ret = i915_gem_object_set_to_gtt_domain(obj, false);
kunmap(page); out:
i915_gem_obj_finish_shmem_access(obj);
return ret; return ret;
err:
kunmap_atomic(d);
ret = -EINVAL;
goto out;
} }
#undef OUT_BATCH #undef OUT_BATCH
int i915_gem_render_state_init(struct drm_i915_gem_request *req) int i915_gem_render_state_init(struct intel_engine_cs *engine)
{ {
struct render_state so; struct intel_render_state *so;
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
if (WARN_ON(req->engine->id != RCS)) if (engine->id != RCS)
return -ENOENT; return 0;
so.rodata = render_state_get_rodata(req); rodata = render_state_get_rodata(engine);
if (!so.rodata) if (!rodata)
return 0; return 0;
if (so.rodata->batch_items * 4 > 4096) if (rodata->batch_items * 4 > 4096)
return -EINVAL; return -EINVAL;
obj = i915_gem_object_create(&req->i915->drm, 4096); so = kmalloc(sizeof(*so), GFP_KERNEL);
if (IS_ERR(obj)) if (!so)
return PTR_ERR(obj); return -ENOMEM;
so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL); obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(so.vma)) { if (IS_ERR(obj)) {
ret = PTR_ERR(so.vma); ret = PTR_ERR(obj);
goto err_obj; goto err_free;
} }
ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL); so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
if (ret) if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
goto err_obj; goto err_obj;
}
so->rodata = rodata;
engine->render_state = so;
return 0;
err_obj:
i915_gem_object_put(obj);
err_free:
kfree(so);
return ret;
}
int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
{
struct intel_render_state *so;
int ret;
lockdep_assert_held(&req->i915->drm.struct_mutex);
ret = render_state_setup(&so); so = req->engine->render_state;
if (!so)
return 0;
/* Recreate the page after shrinking */
if (!so->vma->obj->mm.pages)
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret) if (ret)
goto err_unpin; return ret;
if (so->vma->node.start != so->batch_offset) {
ret = render_state_setup(so, req->i915);
if (ret)
goto err_unpin;
}
ret = req->engine->emit_bb_start(req, so.vma->node.start, ret = req->engine->emit_bb_start(req,
so.rodata->batch_items * 4, so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
if (so.aux_batch_size > 8) { if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req, ret = req->engine->emit_bb_start(req,
(so.vma->node.start + so->aux_offset, so->aux_size,
so.aux_batch_offset),
so.aux_batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
} }
i915_vma_move_to_active(so.vma, req, 0); i915_vma_move_to_active(so->vma, req, 0);
err_unpin: err_unpin:
i915_vma_unpin(so.vma); i915_vma_unpin(so->vma);
err_obj:
i915_gem_object_put(obj);
return ret; return ret;
} }
void i915_gem_render_state_fini(struct intel_engine_cs *engine)
{
struct intel_render_state *so;
struct drm_i915_gem_object *obj;
so = fetch_and_zero(&engine->render_state);
if (!so)
return;
obj = so->vma->obj;
i915_vma_close(so->vma);
__i915_gem_object_release_unless_active(obj);
kfree(so);
}
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
struct drm_i915_gem_request; struct drm_i915_gem_request;
int i915_gem_render_state_init(struct drm_i915_gem_request *req); int i915_gem_render_state_init(struct intel_engine_cs *engine);
int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */ #endif /* _I915_GEM_RENDER_STATE_H_ */
This diff is collapsed.
...@@ -81,11 +81,14 @@ struct drm_i915_gem_request { ...@@ -81,11 +81,14 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_ring *ring; struct intel_ring *ring;
struct intel_timeline *timeline;
struct intel_signal_node signaling; struct intel_signal_node signaling;
struct i915_sw_fence submit; struct i915_sw_fence submit;
wait_queue_t submitq; wait_queue_t submitq;
u32 global_seqno;
/** GEM sequence number associated with the previous request, /** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing * when the HWS breadcrumb is equal to this the GPU is processing
* this request. * this request.
...@@ -147,7 +150,7 @@ struct drm_i915_gem_request { ...@@ -147,7 +150,7 @@ struct drm_i915_gem_request {
extern const struct dma_fence_ops i915_fence_ops; extern const struct dma_fence_ops i915_fence_ops;
static inline bool fence_is_i915(struct dma_fence *fence) static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{ {
return fence->ops == &i915_fence_ops; return fence->ops == &i915_fence_ops;
} }
...@@ -162,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); ...@@ -162,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline u32 static inline u32
i915_gem_request_get_seqno(struct drm_i915_gem_request *req) i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{ {
return req ? req->fence.seqno : 0; return req ? req->global_seqno : 0;
} }
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
...@@ -176,7 +179,7 @@ to_request(struct dma_fence *fence) ...@@ -176,7 +179,7 @@ to_request(struct dma_fence *fence)
{ {
/* We assume that NULL fence/request are interoperable */ /* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
GEM_BUG_ON(fence && !fence_is_i915(fence)); GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence); return container_of(fence, struct drm_i915_gem_request, fence);
} }
...@@ -214,6 +217,8 @@ int ...@@ -214,6 +217,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to, i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
bool write); bool write);
int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \ #define i915_add_request(req) \
...@@ -226,13 +231,13 @@ struct intel_rps_client; ...@@ -226,13 +231,13 @@ struct intel_rps_client;
#define IS_RPS_CLIENT(p) (!IS_ERR(p)) #define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p)) #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
int i915_wait_request(struct drm_i915_gem_request *req, long i915_wait_request(struct drm_i915_gem_request *req,
unsigned int flags, unsigned int flags,
s64 *timeout, long timeout)
struct intel_rps_client *rps)
__attribute__((nonnull(1))); __attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
...@@ -245,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) ...@@ -245,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
} }
static inline bool static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req) __i915_gem_request_started(const struct drm_i915_gem_request *req)
{ {
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine), return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno); req->previous_seqno);
} }
static inline bool static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req) i915_gem_request_started(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
return false;
return __i915_gem_request_started(req);
}
static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req)
{ {
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine), return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->fence.seqno); req->global_seqno);
}
static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
return false;
return __i915_gem_request_completed(req);
} }
bool __i915_spin_request(const struct drm_i915_gem_request *request, bool __i915_spin_request(const struct drm_i915_gem_request *request,
...@@ -263,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request, ...@@ -263,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request, static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us) int state, unsigned long timeout_us)
{ {
return (i915_gem_request_started(request) && return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us)); __i915_spin_request(request, state, timeout_us));
} }
...@@ -551,54 +576,14 @@ i915_gem_active_isset(const struct i915_gem_active *active) ...@@ -551,54 +576,14 @@ i915_gem_active_isset(const struct i915_gem_active *active)
return rcu_access_pointer(active->request); return rcu_access_pointer(active->request);
} }
/**
* i915_gem_active_is_idle - report whether the active tracker is idle
* @active - the active tracker
*
* i915_gem_active_is_idle() returns true if the active tracker is currently
* unassigned or if the request is complete (but not yet retired). Requires
* the caller to hold struct_mutex (but that can be relaxed if desired).
*/
static inline bool
i915_gem_active_is_idle(const struct i915_gem_active *active,
struct mutex *mutex)
{
return !i915_gem_active_peek(active, mutex);
}
/** /**
* i915_gem_active_wait - waits until the request is completed * i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait * @active - the active request on which to wait
*
* i915_gem_active_wait() waits until the request is completed before
* returning. Note that it does not guarantee that the request is
* retired first, see i915_gem_active_retire().
*
* i915_gem_active_wait() returns immediately if the active
* request is already complete.
*/
static inline int __must_check
i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
{
struct drm_i915_gem_request *request;
request = i915_gem_active_peek(active, mutex);
if (!request)
return 0;
return i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
}
/**
* i915_gem_active_wait_unlocked - waits until the request is completed
* @active - the active request on which to wait
* @flags - how to wait * @flags - how to wait
* @timeout - how long to wait at most * @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost * @rps - userspace client to charge for a waitboost
* *
* i915_gem_active_wait_unlocked() waits until the request is completed before * i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not * returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning. * retire any requests before returning.
* *
...@@ -614,21 +599,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex) ...@@ -614,21 +599,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code. * Returns 0 if successful, or a negative error code.
*/ */
static inline int static inline int
i915_gem_active_wait_unlocked(const struct i915_gem_active *active, i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret = 0; long ret = 0;
request = i915_gem_active_get_unlocked(active); request = i915_gem_active_get_unlocked(active);
if (request) { if (request) {
ret = i915_wait_request(request, flags, timeout, rps); ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request); i915_gem_request_put(request);
} }
return ret; return ret < 0 ? ret : 0;
} }
/** /**
...@@ -645,7 +627,7 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -645,7 +627,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex) struct mutex *mutex)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret; long ret;
request = i915_gem_active_raw(active, mutex); request = i915_gem_active_raw(active, mutex);
if (!request) if (!request)
...@@ -653,8 +635,8 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -653,8 +635,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request, ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret < 0)
return ret; return ret;
list_del_init(&active->link); list_del_init(&active->link);
...@@ -665,24 +647,6 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -665,24 +647,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0; return 0;
} }
/* Convenience functions for peeking at state inside active's request whilst
* guarded by the struct_mutex.
*/
static inline uint32_t
i915_gem_active_get_seqno(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
}
static inline struct intel_engine_cs *
i915_gem_active_get_engine(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
}
#define for_each_active(mask, idx) \ #define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
......
...@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) ...@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif #endif
} }
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
*unlock = false;
} else {
*unlock = true;
}
return true;
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj) static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -66,8 +80,11 @@ static bool swap_available(void) ...@@ -66,8 +80,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj) static bool can_release_pages(struct drm_i915_gem_object *obj)
{ {
/* Only shmemfs objects are backed by swap */ if (!obj->mm.pages)
if (!obj->base.filp) return false;
/* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj))
return false; return false;
/* Only report true if by unbinding the object and putting its pages /* Only report true if by unbinding the object and putting its pages
...@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed * to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves. * in releasing our pin count on the pages themselves.
*/ */
if (obj->pages_pin_count > obj->bind_count) if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false; return false;
if (any_vma_pinned(obj)) if (any_vma_pinned(obj))
...@@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being * discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap. * purgeable) or if we can move their contents out to swap.
*/ */
return swap_available() || obj->madv == I915_MADV_DONTNEED; return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
return !READ_ONCE(obj->mm.pages);
} }
/** /**
...@@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 }, { NULL, 0 },
}, *phase; }, *phase;
unsigned long count = 0; unsigned long count = 0;
bool unlock;
if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
return 0;
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
...@@ -171,15 +199,19 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -171,15 +199,19 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target && while (count < target &&
(obj = list_first_entry_or_null(phase->list, (obj = list_first_entry_or_null(phase->list,
typeof(*obj), typeof(*obj),
global_list))) { global_link))) {
list_move_tail(&obj->global_list, &still_in_list); list_move_tail(&obj->global_link, &still_in_list);
if (!obj->mm.pages) {
list_del_init(&obj->global_link);
continue;
}
if (flags & I915_SHRINK_PURGEABLE && if (flags & I915_SHRINK_PURGEABLE &&
obj->madv != I915_MADV_DONTNEED) obj->mm.madv != I915_MADV_DONTNEED)
continue; continue;
if (flags & I915_SHRINK_VMAPS && if (flags & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mapping)) !is_vmalloc_addr(obj->mm.mapping))
continue; continue;
if (!(flags & I915_SHRINK_ACTIVE) && if (!(flags & I915_SHRINK_ACTIVE) &&
...@@ -190,22 +222,28 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -190,22 +222,28 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj)) if (!can_release_pages(obj))
continue; continue;
i915_gem_object_get(obj); if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
/* For the unbound phase, this should be a no-op! */ mutex_lock_nested(&obj->mm.lock,
i915_gem_object_unbind(obj); I915_MM_SHRINKER);
if (i915_gem_object_put_pages(obj) == 0) if (!obj->mm.pages) {
count += obj->base.size >> PAGE_SHIFT; __i915_gem_object_invalidate(obj);
list_del_init(&obj->global_link);
i915_gem_object_put(obj); count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
}
} }
list_splice(&still_in_list, phase->list); list_splice_tail(&still_in_list, phase->list);
} }
if (flags & I915_SHRINK_BOUND) if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
if (unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
/* expedite the RCU grace period to free some request slabs */ /* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited(); synchronize_rcu_expedited();
...@@ -239,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) ...@@ -239,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed; return freed;
} }
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
*unlock = false;
} else
*unlock = true;
return true;
}
static unsigned long static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{ {
...@@ -268,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -268,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
count = 0; count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj)) if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
...@@ -373,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -373,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware. * being pointed to by hardware.
*/ */
unbound = bound = unevictable = 0; unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (!obj->mm.pages)
continue;
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
unbound += obj->base.size >> PAGE_SHIFT; unbound += obj->base.size >> PAGE_SHIFT;
} }
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!obj->mm.pages)
continue;
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
......
...@@ -109,7 +109,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -109,7 +109,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* *
*/ */
base = 0; base = 0;
if (INTEL_INFO(dev)->gen >= 3) { if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm; u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm); pci_read_config_dword(pdev, INTEL_BSM, &bsm);
...@@ -138,7 +138,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -138,7 +138,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud); I865_TOUD, &toud);
base = (toud << 16) + tseg_size; base = (toud << 16) + tseg_size;
} else if (IS_I85X(dev)) { } else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
u8 tmp; u8 tmp;
...@@ -546,25 +546,29 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -546,25 +546,29 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st; return st;
} }
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) static struct sg_table *
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{ {
BUG(); return i915_pages_create_for_stolen(obj->base.dev,
return -EINVAL; obj->stolen->start,
obj->stolen->size);
} }
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
/* Should only be called during free */ /* Should only be called during free */
sg_free_table(obj->pages); sg_free_table(pages);
kfree(obj->pages); kfree(pages);
} }
static void static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
__i915_gem_object_unpin_pages(obj);
if (obj->stolen) { if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen); i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen); kfree(obj->stolen);
...@@ -590,20 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev, ...@@ -590,20 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size); drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen; obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
return obj; return obj;
cleanup: cleanup:
...@@ -698,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -698,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE) if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj; return obj;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL); vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err_pages;
} }
/* To simplify the initialisation sequence between KMS and GTT, /* To simplify the initialisation sequence between KMS and GTT,
...@@ -715,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -715,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err; goto err_pages;
} }
vma->pages = obj->pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++; obj->bind_count++;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
i915_gem_object_pin_pages(obj);
return obj; return obj;
err_pages:
i915_gem_object_unpin_pages(obj);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return NULL; return NULL;
......
...@@ -201,12 +201,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -201,12 +201,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_tiling_ok(dev, if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) { args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put_unlocked(obj); i915_gem_object_put(obj);
return -EINVAL; return -EINVAL;
} }
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) { if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY; err = -EBUSY;
...@@ -261,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -261,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) { if (!err) {
struct i915_vma *vma; struct i915_vma *vma;
if (obj->pages && mutex_lock(&obj->mm.lock);
obj->madv == I915_MADV_WILLNEED && if (obj->mm.pages &&
obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE) if (args->tiling_mode == I915_TILING_NONE) {
i915_gem_object_unpin_pages(obj); GEM_BUG_ON(!obj->mm.quirked);
if (!i915_gem_object_is_tiled(obj)) __i915_gem_object_unpin_pages(obj);
i915_gem_object_pin_pages(obj); obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
} }
mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence) if (!vma->fence)
...@@ -302,8 +308,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -302,8 +308,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_object_put(obj); i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
return err; return err;
} }
...@@ -327,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, ...@@ -327,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data; struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err = -ENOENT;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
args->tiling_mode =
READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
err = 0;
}
rcu_read_unlock();
if (unlikely(err))
return err;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) { switch (args->tiling_mode) {
case I915_TILING_X: case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
...@@ -340,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, ...@@ -340,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y: case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break; break;
default:
case I915_TILING_NONE: case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break; break;
default:
DRM_ERROR("unknown tiling mode\n");
} }
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
...@@ -357,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, ...@@ -357,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
i915_gem_object_put_unlocked(obj);
return 0; return 0;
} }
/* /*
* Copyright 2016 Intel Corporation * Copyright © 2016 Intel Corporation
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
...@@ -17,29 +17,49 @@ ...@@ -17,29 +17,49 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* DEALINGS IN THE SOFTWARE. * IN THE SOFTWARE.
* *
*/ */
#ifndef _I915_GEM_DMABUF_H_ #include "i915_drv.h"
#define _I915_GEM_DMABUF_H_
#include <linux/dma-buf.h> int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *timeline,
static inline struct reservation_object * const char *name)
i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
{ {
struct dma_buf *dma_buf; unsigned int i;
u64 fences;
lockdep_assert_held(&i915->drm.struct_mutex);
timeline->i915 = i915;
timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
if (!timeline->name)
return -ENOMEM;
list_add(&timeline->link, &i915->gt.timelines);
/* Called during early_init before we know how many engines there are */
fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
struct intel_timeline *tl = &timeline->engine[i];
if (obj->base.dma_buf) tl->fence_context = fences++;
dma_buf = obj->base.dma_buf; tl->common = timeline;
else if (obj->base.import_attach)
dma_buf = obj->base.import_attach->dmabuf;
else
return NULL;
return dma_buf->resv; spin_lock_init(&tl->lock);
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
}
return 0;
} }
#endif void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
{
lockdep_assert_held(&tl->i915->drm.struct_mutex);
list_del(&tl->link);
kfree(tl->name);
}
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef I915_GEM_TIMELINE_H
#define I915_GEM_TIMELINE_H
#include <linux/list.h>
#include "i915_gem_request.h"
struct i915_gem_timeline;
struct intel_timeline {
u64 fence_context;
u32 last_submitted_seqno;
spinlock_t lock;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head requests;
/* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
* the request using i915_gem_active_get_request_rcu(), or hold the
* struct_mutex.
*/
struct i915_gem_active last_request;
u32 sync_seqno[I915_NUM_ENGINES];
struct i915_gem_timeline *common;
};
struct i915_gem_timeline {
struct list_head link;
atomic_t next_seqno;
struct drm_i915_private *i915;
const char *name;
struct intel_timeline engine[I915_NUM_ENGINES];
};
int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = { ...@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \ #define BDW_FEATURES \
HSW_FEATURES, \ HSW_FEATURES, \
BDW_COLORS, \ BDW_COLORS, \
.has_logical_ring_contexts = 1 .has_logical_ring_contexts = 1, \
.has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = { static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES, BDW_FEATURES,
...@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = { ...@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1, .is_cherryview = 1,
.has_64bit_reloc = 1,
.has_psr = 1, .has_psr = 1,
.has_runtime_pm = 1, .has_runtime_pm = 1,
.has_resource_streamer = 1, .has_resource_streamer = 1,
...@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = { ...@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3, .num_pipes = 3,
.has_64bit_reloc = 1,
.has_ddi = 1, .has_ddi = 1,
.has_fpga_dbg = 1, .has_fpga_dbg = 1,
.has_fbc = 1, .has_fbc = 1,
......
This diff is collapsed.
This diff is collapsed.
...@@ -46,6 +46,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence); ...@@ -46,6 +46,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after, struct i915_sw_fence *after,
wait_queue_t *wq); wait_queue_t *wq);
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma, struct dma_fence *dma,
unsigned long timeout, unsigned long timeout,
...@@ -62,4 +65,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence) ...@@ -62,4 +65,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
return atomic_read(&fence->pending) < 0; return atomic_read(&fence->pending) < 0;
} }
static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
{
wait_event(fence->wait, i915_sw_fence_done(fence));
}
#endif /* _I915_SW_FENCE_H_ */ #endif /* _I915_SW_FENCE_H_ */
This diff is collapsed.
...@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane) ...@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base; state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state); __drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
return state; return state;
} }
...@@ -101,7 +100,6 @@ void ...@@ -101,7 +100,6 @@ void
intel_plane_destroy_state(struct drm_plane *plane, intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state); drm_atomic_helper_plane_destroy_state(plane, state);
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -573,7 +573,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) ...@@ -573,7 +573,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
POSTING_READ(pipeconf_reg); POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue /* Wait for next Vblank to substitue
* border color for Color info */ * border color for Color info */
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE); st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ? status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected : connector_status_connected :
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment