Commit be6a0376 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: Extract i915_gem_shrinker.c

Two code changes:
- Extract i915_gem_shrinker_init.
- Inline i915_gem_object_is_purgeable since we open-code it everywhere
  else too.

This already has the benefit of pulling all the shrinker code
together, next patch adds a bit of kerneldoc.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 6f4b12f8
...@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \ ...@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \ i915_gem_execbuffer.o \
i915_gem_gtt.o \ i915_gem_gtt.o \
i915_gem.o \ i915_gem.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \ i915_gem_stolen.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_gem_userptr.o \ i915_gem_userptr.o \
......
...@@ -2600,12 +2600,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -2600,12 +2600,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
...@@ -2953,6 +2947,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -2953,6 +2947,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset, u32 gtt_offset,
u32 size); u32 size);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{ {
......
/* /*
* Copyright © 2008 Intel Corporation * Copyright © 2008-2015 Intel Corporation
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include "i915_vgpu.h" #include "i915_vgpu.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
#include <linux/oom.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h> #include <linux/swap.h>
...@@ -53,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, ...@@ -53,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence, struct drm_i915_fence_reg *fence,
bool enable); bool enable);
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc);
static int i915_gem_shrinker_oom(struct notifier_block *nb,
unsigned long event,
void *ptr);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level) enum i915_cache_level level)
{ {
...@@ -1936,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, ...@@ -1936,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
} }
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
}
/* Immediately discard the backing storage */ /* Immediately discard the backing storage */
static void static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj) i915_gem_object_truncate(struct drm_i915_gem_object *obj)
...@@ -2047,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2047,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
long target, unsigned flags)
{
const struct {
struct list_head *list;
unsigned int bit;
} phases[] = {
{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
/*
* As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
* on every iteration.
*
* In particular, we must hold a reference whilst removing the
* object as we may end up waiting for and/or retiring the objects.
* This might release the final reference (held by the active list)
* and result in the object being freed from under us. This is
* similar to the precautions the eviction code must take whilst
* removing objects.
*
* Also note that although these lists do not hold a reference to
* the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
if ((flags & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
obj = list_first_entry(phase->list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
!i915_gem_object_is_purgeable(obj))
continue;
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
&obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, phase->list);
}
return count;
}
static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
i915_gem_evict_everything(dev_priv->dev);
return i915_gem_shrink(dev_priv, LONG_MAX,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
}
static int static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
...@@ -4425,7 +4330,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4425,7 +4330,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->madv = args->madv; obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */ /* if the object is no longer attached, discard its backing storage */
if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED; args->retained = obj->madv != __I915_MADV_PURGED;
...@@ -5064,13 +4969,7 @@ i915_gem_load(struct drm_device *dev) ...@@ -5064,13 +4969,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true; dev_priv->mm.interruptible = true;
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; i915_gem_shrinker_init(dev_priv);
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.shrinker);
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
...@@ -5162,77 +5061,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, ...@@ -5162,77 +5061,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
} }
} }
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
if (to_i915(dev)->mm.shrinker_no_lock_stealing)
return false;
*unlock = false;
} else
*unlock = true;
return true;
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
count++;
return count;
}
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock))
return 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!i915_gem_obj_is_pinned(obj) &&
obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
return count;
}
/* All the new VM stuff */ /* All the new VM stuff */
unsigned long unsigned long
i915_gem_obj_offset(struct drm_i915_gem_object *o, i915_gem_obj_offset(struct drm_i915_gem_object *o,
...@@ -5333,102 +5161,6 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, ...@@ -5333,102 +5161,6 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0; return 0;
} }
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = pinned = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (!obj->base.filp) /* not backed by a freeable object */
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
else
unbound += obj->base.size;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->base.filp)
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
else
bound += obj->base.size;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
......
/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
long target, unsigned flags)
{
const struct {
struct list_head *list;
unsigned int bit;
} phases[] = {
{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
/*
* As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
* on every iteration.
*
* In particular, we must hold a reference whilst removing the
* object as we may end up waiting for and/or retiring the objects.
* This might release the final reference (held by the active list)
* and result in the object being freed from under us. This is
* similar to the precautions the eviction code must take whilst
* removing objects.
*
* Also note that although these lists do not hold a reference to
* the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
if ((flags & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
obj = list_first_entry(phase->list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
obj->madv != I915_MADV_DONTNEED)
continue;
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
&obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, phase->list);
}
return count;
}
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
i915_gem_evict_everything(dev_priv->dev);
return i915_gem_shrink(dev_priv, LONG_MAX,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
if (to_i915(dev)->mm.shrinker_no_lock_stealing)
return false;
*unlock = false;
} else
*unlock = true;
return true;
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
count++;
return count;
}
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock))
return 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!i915_gem_obj_is_pinned(obj) &&
obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
return count;
}
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = pinned = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (!obj->base.filp) /* not backed by a freeable object */
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
else
unbound += obj->base.size;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->base.filp)
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
else
bound += obj->base.size;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
{
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.shrinker);
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment