Commit 73ebd503 authored by Matthew Auld's avatar Matthew Auld Committed by Joonas Lahtinen

drm/i915: make mappable struct resource centric

Now that we are using struct resource to track the stolen region, it is
more convenient if we track the mappable region in a resource as well.

v2: prefer iomap and gmadr naming scheme
    prefer DEFINE_RES_MEM
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171211151822.20953-8-matthew.auld@intel.com
parent 17a05345
...@@ -348,7 +348,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -348,7 +348,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */ /* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \ #define gvt_ggtt_sz(gvt) \
......
...@@ -726,7 +726,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -726,7 +726,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap) if (!ap)
return -ENOMEM; return -ENOMEM;
ap->ranges[0].base = ggtt->mappable_base; ap->ranges[0].base = ggtt->gmadr.start;
ap->ranges[0].size = ggtt->mappable_end; ap->ranges[0].size = ggtt->mappable_end;
primary = primary =
......
...@@ -1116,7 +1116,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -1116,7 +1116,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_base += offset & PAGE_MASK; page_base += offset & PAGE_MASK;
} }
if (gtt_user_read(&ggtt->mappable, page_base, page_offset, if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) { user_data, page_length)) {
ret = -EFAULT; ret = -EFAULT;
break; break;
...@@ -1324,7 +1324,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1324,7 +1324,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* If the object is non-shmem backed, we retry again with the * If the object is non-shmem backed, we retry again with the
* path that handles page fault. * path that handles page fault.
*/ */
if (ggtt_write(&ggtt->mappable, page_base, page_offset, if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) { user_data, page_length)) {
ret = -EFAULT; ret = -EFAULT;
break; break;
...@@ -1967,9 +1967,9 @@ int i915_gem_fault(struct vm_fault *vmf) ...@@ -1967,9 +1967,9 @@ int i915_gem_fault(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start), min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable); &ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;
......
...@@ -1012,7 +1012,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1012,7 +1012,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT; offset += page << PAGE_SHIFT;
} }
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable, vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset); offset);
cache->page = page; cache->page = page;
cache->vaddr = (unsigned long)vaddr; cache->vaddr = (unsigned long)vaddr;
......
...@@ -2912,7 +2912,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -2912,7 +2912,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr); arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->mappable); io_mapping_fini(&ggtt->iomap);
} }
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
...@@ -3288,8 +3288,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ...@@ -3288,8 +3288,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err; int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */ /* TODO: We're not aware of mappable constraints on gen8 yet */
ggtt->mappable_base = pci_resource_start(pdev, 2); ggtt->gmadr =
ggtt->mappable_end = pci_resource_len(pdev, 2); (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
ggtt->mappable_end = resource_size(&ggtt->gmadr);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err) if (!err)
...@@ -3343,8 +3345,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ...@@ -3343,8 +3345,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl; u16 snb_gmch_ctl;
int err; int err;
ggtt->mappable_base = pci_resource_start(pdev, 2); ggtt->gmadr =
ggtt->mappable_end = pci_resource_len(pdev, 2); (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
ggtt->mappable_end = resource_size(&ggtt->gmadr);
/* 64/512MB is the current min/max we actually know of, but this is just /* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check. * a coarse sanity check.
...@@ -3397,6 +3401,7 @@ static void i915_gmch_remove(struct i915_address_space *vm) ...@@ -3397,6 +3401,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt) static int i915_gmch_probe(struct i915_ggtt *ggtt)
{ {
struct drm_i915_private *dev_priv = ggtt->base.i915; struct drm_i915_private *dev_priv = ggtt->base.i915;
phys_addr_t gmadr_base;
int ret; int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
...@@ -3406,9 +3411,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ...@@ -3406,9 +3411,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
} }
intel_gtt_get(&ggtt->base.total, intel_gtt_get(&ggtt->base.total,
&ggtt->mappable_base, &gmadr_base,
&ggtt->mappable_end); &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv); ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page; ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries; ggtt->base.insert_entries = i915_ggtt_insert_entries;
...@@ -3476,7 +3485,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) ...@@ -3476,7 +3485,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
/* GMADR is the PCI mmio aperture into the global GTT. */ /* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n", DRM_INFO("Memory usable by graphics device = %lluM\n",
ggtt->base.total >> 20); ggtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n", DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20); (u64)resource_size(&intel_graphics_stolen_res) >> 20);
if (intel_vtd_active()) if (intel_vtd_active())
...@@ -3507,14 +3516,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ...@@ -3507,14 +3516,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.mm.color_adjust = i915_gtt_color_adjust; ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
dev_priv->ggtt.mappable_base, dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) { dev_priv->ggtt.mappable_end)) {
ret = -EIO; ret = -EIO;
goto out_gtt_cleanup; goto out_gtt_cleanup;
} }
ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end); ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
/* /*
* Initialise stolen early so that we may reserve preallocated * Initialise stolen early so that we may reserve preallocated
......
...@@ -368,9 +368,9 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) ...@@ -368,9 +368,9 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
*/ */
struct i915_ggtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */ struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
u64 mappable_end; /* End offset that we can CPU map */ u64 mappable_end; /* End offset that we can CPU map */
/* Stolen memory is segmented in hardware with different portions /* Stolen memory is segmented in hardware with different portions
......
...@@ -956,7 +956,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -956,7 +956,7 @@ i915_error_object_create(struct drm_i915_private *i915,
ggtt->base.insert_page(&ggtt->base, dma, slot, ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0); I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst); ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
......
...@@ -311,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -311,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = vma->iomap; ptr = vma->iomap;
if (ptr == NULL) { if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start, vma->node.start,
vma->node.size); vma->node.size);
if (ptr == NULL) { if (ptr == NULL) {
......
...@@ -14595,7 +14595,7 @@ int intel_modeset_init(struct drm_device *dev) ...@@ -14595,7 +14595,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
} }
dev->mode_config.fb_base = ggtt->mappable_base; dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n", DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes, INTEL_INFO(dev_priv)->num_pipes,
......
...@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) ...@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_wc(&dev_priv->ggtt.mappable, regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr, overlay->flip_addr,
PAGE_SIZE); PAGE_SIZE);
...@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *) regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr; overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable, regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr); overlay->flip_addr);
return regs; return regs;
......
...@@ -1074,7 +1074,7 @@ static int igt_ggtt_page(void *arg) ...@@ -1074,7 +1074,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0), i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n); iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr); io_mapping_unmap_atomic(vaddr);
...@@ -1092,7 +1092,7 @@ static int igt_ggtt_page(void *arg) ...@@ -1092,7 +1092,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0), i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n); val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr); io_mapping_unmap_atomic(vaddr);
......
...@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915) ...@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.i915 = i915; ggtt->base.i915 = i915;
ggtt->mappable_base = 0; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = 2048 * PAGE_SIZE; ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE; ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range; ggtt->base.clear_range = nop_clear_range;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment