Commit 98932149 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move object->pages API to i915_gem_object.[ch]

Currently the code for manipulating the pages on an object is still
residing in i915_gem.c, move it to i915_gem_object.c
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-3-chris@chris-wilson.co.uk
parent afa13085
...@@ -86,7 +86,10 @@ i915-y += $(gt-y) ...@@ -86,7 +86,10 @@ i915-y += $(gt-y)
# GEM (Graphics Execution Management) code # GEM (Graphics Execution Management) code
obj-y += gem/ obj-y += gem/
gem-y += \
gem/i915_gem_object.o
i915-y += \ i915-y += \
$(gem-y) \
i915_active.o \ i915_active.o \
i915_cmd_parser.o \ i915_cmd_parser.o \
i915_gem_batch_pool.o \ i915_gem_batch_pool.o \
...@@ -99,7 +102,6 @@ i915-y += \ ...@@ -99,7 +102,6 @@ i915-y += \
i915_gem_gtt.o \ i915_gem_gtt.o \
i915_gem_internal.o \ i915_gem_internal.o \
i915_gem.o \ i915_gem.o \
i915_gem_object.o \
i915_gem_pm.o \ i915_gem_pm.o \
i915_gem_render_state.o \ i915_gem_render_state.o \
i915_gem_shrinker.o \ i915_gem_shrinker.o \
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "gem/i915_gem_object_types.h" #include "i915_gem_object_types.h"
struct drm_i915_gem_object *i915_gem_object_alloc(void); struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_free(struct drm_i915_gem_object *obj);
...@@ -192,6 +192,136 @@ i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) ...@@ -192,6 +192,136 @@ i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride); unsigned int tiling, unsigned int stride);
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n, unsigned int *offset);
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned int *len);
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
might_lock(&obj->mm.lock);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
return __i915_gem_object_get_pages(obj);
}
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count);
}
static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
return atomic_read(&obj->mm.pages_pin_count);
}
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
atomic_dec(&obj->mm.pages_pin_count);
}
static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
__i915_gem_object_unpin_pages(obj);
}
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
};
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
#define I915_MAP_OVERRIDE BIT(31)
I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
* @type: the type of mapping, used to select pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
* The caller is responsible for calling i915_gem_object_unpin_map() when the
* mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
__i915_gem_object_flush_map(obj, 0, obj->base.size);
}
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{ {
......
...@@ -2825,141 +2825,6 @@ static inline int __sg_page_count(const struct scatterlist *sg) ...@@ -2825,141 +2825,6 @@ static inline int __sg_page_count(const struct scatterlist *sg)
return sg->length >> PAGE_SHIFT; return sg->length >> PAGE_SHIFT;
} }
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n, unsigned int *offset);
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned int *len);
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
might_lock(&obj->mm.lock);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
return __i915_gem_object_get_pages(obj);
}
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count);
}
static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
return atomic_read(&obj->mm.pages_pin_count);
}
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
atomic_dec(&obj->mm.pages_pin_count);
}
static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
__i915_gem_object_unpin_pages(obj);
}
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
};
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
#define I915_MAP_OVERRIDE BIT(31)
I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
* @type: the type of mapping, used to select pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
* The caller is responsible for calling i915_gem_object_unpin_map() when the
* mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
__i915_gem_object_flush_map(obj, 0, obj->base.size);
}
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush); unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
...@@ -3358,6 +3223,12 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) ...@@ -3358,6 +3223,12 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
return i915_ggtt_offset(i915->gt.scratch); return i915_ggtt_offset(i915->gt.scratch);
} }
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
static inline void add_taint_for_CI(unsigned int taint) static inline void add_taint_for_CI(unsigned int taint)
{ {
/* /*
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include "i915_active.h" #include "i915_active.h"
#include "i915_gem_context.h" #include "i915_gem_context.h"
#include "i915_gem_object.h" #include "gem/i915_gem_object.h"
#include "i915_globals.h" #include "i915_globals.h"
#include "i915_request.h" #include "i915_request.h"
#include "i915_scheduler.h" #include "i915_scheduler.h"
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include "i915_gem_gtt.h" #include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h" #include "i915_gem_fence_reg.h"
#include "i915_gem_object.h" #include "gem/i915_gem_object.h"
#include "i915_active.h" #include "i915_active.h"
#include "i915_request.h" #include "i915_request.h"
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#ifndef __INTEL_FRONTBUFFER_H__ #ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__ #define __INTEL_FRONTBUFFER_H__
#include "i915_gem_object.h" #include "gem/i915_gem_object.h"
struct drm_i915_private; struct drm_i915_private;
struct drm_i915_gem_object; struct drm_i915_gem_object;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment