Commit 3c0fa9f4 authored by Ville Syrjälä's avatar Ville Syrjälä

drm/i915: Use struct resource for memory region IO as well

mem->region is a struct resource, but mem->io_start and
mem->io_size are not for whatever reason. Let's unify this
and convert the io stuff into a struct resource as well.
Should make life a little less annoying when you don't have
juggle between two different approaches all the time.

Mostly done using cocci (with manual tweaks at all the
places where we mutate io_size by hand):
@@
struct intel_memory_region *M;
expression START, SIZE;
@@
- M->io_start = START;
- M->io_size = SIZE;
+ M->io = DEFINE_RES_MEM(START, SIZE);

@@
struct intel_memory_region *M;
@@
- M->io_start
+ M->io.start

@@
struct intel_memory_region M;
@@
- M.io_start
+ M.io.start

@@
expression M;
@@
- M->io_size
+ resource_size(&M->io)

@@
expression M;
@@
- M.io_size
+ resource_size(&M.io)
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Acked-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Tested-by: default avatarPaz Zcharya <pazz@chromium.org>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240202224340.30647-2-ville.syrjala@linux.intel.com
parent 2a2e2f5f
...@@ -78,7 +78,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info ...@@ -78,7 +78,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
/* Use fbdev's framebuffer from lmem for discrete */ /* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start = info->fix.smem_start =
(unsigned long)(mem->io_start + (unsigned long)(mem->io.start +
i915_gem_object_get_dma_address(obj, 0)); i915_gem_object_get_dma_address(obj, 0));
info->fix.smem_len = obj->base.size; info->fix.smem_len = obj->base.size;
} else { } else {
......
...@@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem, ...@@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (!(flags & I915_BO_ALLOC_GPU_ONLY) && if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
offset + size > mem->io_size && offset + size > resource_size(&mem->io) &&
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt)) !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
......
...@@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) ...@@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */ /* Exclude the reserved region from driver use */
mem->region.end = i915->dsm.reserved.start - 1; mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region)); mem->io = DEFINE_RES_MEM(mem->io.start,
min(resource_size(&mem->io),
resource_size(&mem->region)));
i915->dsm.usable_size = resource_size(&mem->region); i915->dsm.usable_size = resource_size(&mem->region);
...@@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, ...@@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
* With discrete devices, where we lack a mappable aperture there is no * With discrete devices, where we lack a mappable aperture there is no
* possible way to ever access this memory on the CPU side. * possible way to ever access this memory on the CPU side.
*/ */
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size && if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
!(flags & I915_BO_ALLOC_GPU_ONLY)) !(flags & I915_BO_ALLOC_GPU_ONLY))
return -ENOSPC; return -ENOSPC;
...@@ -838,13 +840,12 @@ static int init_stolen_lmem(struct intel_memory_region *mem) ...@@ -838,13 +840,12 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
return 0; return 0;
} }
if (mem->io_size && if (resource_size(&mem->io) &&
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size)) !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
goto err_cleanup; goto err_cleanup;
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", drm_dbg(&i915->drm, "Stolen Local DSM: %pR\n", &mem->region);
&mem->io_start); drm_dbg(&i915->drm, "Stolen Local memory IO: %pR\n", &mem->io);
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
return 0; return 0;
...@@ -855,7 +856,7 @@ static int init_stolen_lmem(struct intel_memory_region *mem) ...@@ -855,7 +856,7 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
static int release_stolen_lmem(struct intel_memory_region *mem) static int release_stolen_lmem(struct intel_memory_region *mem)
{ {
if (mem->io_size) if (resource_size(&mem->io))
io_mapping_fini(&mem->iomap); io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915); i915_gem_cleanup_stolen(mem->i915);
return 0; return 0;
......
...@@ -144,13 +144,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr, ...@@ -144,13 +144,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT; place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn)); WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT); place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) { } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) { if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN; place->flags |= TTM_PL_FLAG_TOPDOWN;
} else { } else {
place->fpfn = 0; place->fpfn = 0;
WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn)); WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
place->lpfn = mr->io_size >> PAGE_SHIFT; place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
} }
} }
} }
...@@ -1090,7 +1090,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ...@@ -1090,7 +1090,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i]; struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags; unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue; continue;
flags = obj->flags; flags = obj->flags;
......
...@@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr, ...@@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
int err; int err;
total = 0; total = 0;
size = mr->io_size; size = resource_size(&mr->io);
do { do {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg) ...@@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg)
struct intel_memory_region *mixed[] = { mr, system }; struct intel_memory_region *mixed[] = { mr, system };
struct intel_memory_region *single[] = { mr }; struct intel_memory_region *single[] = { mr };
struct ttm_resource_manager *man = mr->region_private; struct ttm_resource_manager *man = mr->region_private;
resource_size_t saved_io_size; struct resource saved_io;
int err; int err;
if (mr->private) if (mr->private)
continue; continue;
if (!mr->io_size) if (!resource_size(&mr->io))
continue; continue;
/* /*
* For testing purposes let's force small BAR, if not already * For testing purposes let's force small BAR, if not already
* present. * present.
*/ */
saved_io_size = mr->io_size; saved_io = mr->io;
if (mr->io_size == mr->total) { if (resource_size(&mr->io) == mr->total) {
resource_size_t io_size = mr->io_size; resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1); io_size = rounddown_pow_of_two(io_size >> 1);
if (io_size < PAGE_SIZE) if (io_size < PAGE_SIZE)
continue; continue;
mr->io_size = io_size; mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
i915_ttm_buddy_man_force_visible_size(man, i915_ttm_buddy_man_force_visible_size(man,
io_size >> PAGE_SHIFT); io_size >> PAGE_SHIFT);
} }
...@@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg) ...@@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_FAIL_GPU | IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE); IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size: out_io_size:
mr->io_size = saved_io_size; mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man, i915_ttm_buddy_man_force_visible_size(man,
mr->io_size >> PAGE_SHIFT); resource_size(&mr->io) >> PAGE_SHIFT);
if (err) if (err)
return err; return err;
} }
......
...@@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem) ...@@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem)
int ret; int ret;
if (!io_mapping_init_wc(&mem->iomap, if (!io_mapping_init_wc(&mem->iomap,
mem->io_start, mem->io.start,
mem->io_size)) resource_size(&mem->io)))
return -EIO; return -EIO;
ret = intel_region_ttm_init(mem); ret = intel_region_ttm_init(mem);
...@@ -274,12 +274,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) ...@@ -274,12 +274,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
goto err_region_put; goto err_region_put;
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region); drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
drm_dbg(&i915->drm, "Local memory IO start: %pa\n", drm_dbg(&i915->drm, "Local memory IO: %pR\n", &mem->io);
&mem->io_start);
drm_info(&i915->drm, "Local memory IO size: %pa\n",
&mem->io_size);
drm_info(&i915->drm, "Local memory available: %pa\n",
&lmem_size);
if (io_size < lmem_size) if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n", drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
......
...@@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt) ...@@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
* of pages. To succeed with both allocations, especially in case of Small * of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory. * BAR, try to allocate no more than quarter of mappable memory.
*/ */
if (mr && size > mr->io_size / 4) if (mr && size > resource_size(&mr->io) / 4)
size = mr->io_size / 4; size = resource_size(&mr->io) / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS); return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
} }
......
...@@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, ...@@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t offset = dma - mem->region.start; dma_addr_t offset = dma - mem->region.start;
void __iomem *s; void __iomem *s;
if (offset + PAGE_SIZE > mem->io_size) { if (offset + PAGE_SIZE > resource_size(&mem->io)) {
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
......
...@@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915, ...@@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.probed_size = mr->total; info.probed_size = mr->total;
if (mr->type == INTEL_MEMORY_LOCAL) if (mr->type == INTEL_MEMORY_LOCAL)
info.probed_cpu_visible_size = mr->io_size; info.probed_cpu_visible_size = resource_size(&mr->io);
else else
info.probed_cpu_visible_size = mr->total; info.probed_cpu_visible_size = mr->total;
......
...@@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem, ...@@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
if (memchr_inv(result, value, sizeof(result))) { if (memchr_inv(result, value, sizeof(result))) {
dev_err(mem->i915->drm.dev, dev_err(mem->i915->drm.dev,
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n", "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
&mem->region, &mem->io_start, &offset, caller, &mem->region, &mem->io.start, &offset, caller,
value, result[0], result[1], result[2]); value, result[0], result[1], result[2]);
return -EINVAL; return -EINVAL;
} }
...@@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem, ...@@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem,
int err; int err;
int i; int i;
va = ioremap_wc(mem->io_start + offset, PAGE_SIZE); va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
if (!va) { if (!va) {
dev_err(mem->i915->drm.dev, dev_err(mem->i915->drm.dev,
"Failed to ioremap memory region [%pa + %pa] for %ps\n", "Failed to ioremap memory region [%pa + %pa] for %ps\n",
&mem->io_start, &offset, caller); &mem->io.start, &offset, caller);
return -EFAULT; return -EFAULT;
} }
...@@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem, ...@@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem,
resource_size_t last, page; resource_size_t last, page;
int err; int err;
if (mem->io_size < PAGE_SIZE) if (resource_size(&mem->io) < PAGE_SIZE)
return 0; return 0;
last = mem->io_size - PAGE_SIZE; last = resource_size(&mem->io) - PAGE_SIZE;
/* /*
* Quick test to check read/write access to the iomap (backing store). * Quick test to check read/write access to the iomap (backing store).
...@@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem, ...@@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915; struct drm_i915_private *i915 = mem->i915;
int err = 0; int err = 0;
if (!mem->io_start) if (!mem->io.start)
return 0; return 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
...@@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915, ...@@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->i915 = i915; mem->i915 = i915;
mem->region = DEFINE_RES_MEM(start, size); mem->region = DEFINE_RES_MEM(start, size);
mem->io_start = io_start; mem->io = DEFINE_RES_MEM(io_start, io_size);
mem->io_size = io_size;
mem->min_page_size = min_page_size; mem->min_page_size = min_page_size;
mem->ops = ops; mem->ops = ops;
mem->total = size; mem->total = size;
......
...@@ -71,8 +71,7 @@ struct intel_memory_region { ...@@ -71,8 +71,7 @@ struct intel_memory_region {
struct io_mapping iomap; struct io_mapping iomap;
struct resource region; struct resource region;
resource_size_t io_start; struct resource io;
resource_size_t io_size;
resource_size_t min_page_size; resource_size_t min_page_size;
resource_size_t total; resource_size_t total;
......
...@@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem) ...@@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
ret = i915_ttm_buddy_man_init(bdev, mem_type, false, ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region), resource_size(&mem->region),
mem->io_size, resource_size(&mem->io),
mem->min_page_size, PAGE_SIZE); mem->min_page_size, PAGE_SIZE);
if (ret) if (ret)
return ret; return ret;
...@@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, ...@@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
goto out; goto out;
} }
place.lpfn = place.fpfn + (size >> PAGE_SHIFT); place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
} else if (mem->io_size && mem->io_size < mem->total) { } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) { if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN; place.flags |= TTM_PL_FLAG_TOPDOWN;
} else { } else {
place.fpfn = 0; place.fpfn = 0;
if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) { if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
ret = -E2BIG; ret = -E2BIG;
goto out; goto out;
} }
place.lpfn = mem->io_size >> PAGE_SHIFT; place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
} }
} }
......
...@@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj) ...@@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
u64 start = drm_buddy_block_offset(block); u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block); u64 end = start + drm_buddy_block_size(mm, block);
if (start < mr->io_size) if (start < resource_size(&mr->io))
total += min_t(u64, end, mr->io_size) - start; total += min_t(u64, end, resource_size(&mr->io)) - start;
} }
return total; return total;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment