msm_gem.h 11.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__

10
#include <linux/kref.h>
11
#include <linux/dma-resv.h>
12
#include "drm/gpu_scheduler.h"
13 14
#include "msm_drv.h"

15 16 17 18 19
/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
 * tend to go wrong 1000s of times in a short timespan.
 */
#define GEM_WARN_ON(x)  WARN_RATELIMIT(x, "%s", __stringify(x))

20 21
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
22
#define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
23

24 25 26 27 28 29
struct msm_gem_address_space {
	const char *name;
	/* NOTE: mm managed at the page level, size is in # of pages
	 * and position mm_node->start is in # of pages:
	 */
	struct drm_mm mm;
30
	spinlock_t lock; /* Protects drm_mm node allocation/removal */
31
	struct msm_mmu *mmu;
32
	struct kref kref;
33 34 35 36 37

	/* For address spaces associated with a specific process, this
	 * will be non-NULL:
	 */
	struct pid *pid;
38 39 40

	/* @faults: the number of GPU hangs associated with this address space */
	int faults;
41 42
};

Rob Clark's avatar
Rob Clark committed
43 44 45 46 47 48 49 50 51
struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space *aspace);

void msm_gem_address_space_put(struct msm_gem_address_space *aspace);

struct msm_gem_address_space *
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
		u64 va_start, u64 size);

52 53 54
struct msm_gem_vma {
	struct drm_mm_node node;
	uint64_t iova;
55 56
	struct msm_gem_address_space *aspace;
	struct list_head list;    /* node in msm_gem_object::vmas */
57
	bool mapped;
58
	int inuse;
59 60
};

Rob Clark's avatar
Rob Clark committed
61 62 63 64 65 66 67 68 69 70 71 72 73
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int npages,
		u64 range_start, u64 range_end);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int prot,
		struct sg_table *sgt, int npages);
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);

74 75 76 77 78
struct msm_gem_object {
	struct drm_gem_object base;

	uint32_t flags;

Rob Clark's avatar
Rob Clark committed
79 80 81 82 83
	/**
	 * Advice: are the backing pages purgeable?
	 */
	uint8_t madv;

84 85 86 87 88
	/**
	 * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
	 */
	bool dontneed : 1;

89 90 91 92 93
	/**
	 * Is object evictable (ie. counted in priv->evictable_count)?
	 */
	bool evictable : 1;

Rob Clark's avatar
Rob Clark committed
94 95 96 97 98
	/**
	 * count of active vmap'ing
	 */
	uint8_t vmap_count;

Rob Clark's avatar
Rob Clark committed
99 100 101 102 103 104
	/**
	 * Node in list of all objects (mainly for debugfs, protected by
	 * priv->obj_lock
	 */
	struct list_head node;

105 106
	/**
	 * An object is either:
Rob Clark's avatar
Rob Clark committed
107
	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
108
	 *     (depending on purgeability status)
Rob Clark's avatar
Rob Clark committed
109 110 111 112 113
	 *  active   - on one one of the gpu's active_list..  well, at
	 *     least for now we don't have (I don't think) hw sync between
	 *     2d and 3d one devices which have both, meaning we need to
	 *     block on submit if a bo is already on other ring
	 */
114
	struct list_head mm_list;
Rob Clark's avatar
Rob Clark committed
115

116 117 118 119
	struct page **pages;
	struct sg_table *sgt;
	void *vaddr;

120
	struct list_head vmas;    /* list of msm_gem_vma */
Rob Clark's avatar
Rob Clark committed
121

122
	/* For physically contiguous buffers.  Used when we don't have
123
	 * an IOMMU.  Also used for stolen/splashscreen buffer.
124 125
	 */
	struct drm_mm_node *vram_node;
126 127

	char name[32]; /* Identifier to print for the debugfs files */
128

129
	int active_count;
130
	int pin_count;
131 132 133
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)

134 135 136 137 138 139
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end);
140 141
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova);
142 143 144 145
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace);
146 147
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace);
148 149 150 151 152 153 154 155
void msm_gem_unpin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset);
156
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
157 158
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
159
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova);
void msm_gem_kernel_put(struct drm_gem_object *bo,
175
		struct msm_gem_address_space *aspace);
176 177 178 179
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
180

181
#ifdef CONFIG_DEBUG_FS
182 183 184 185
struct msm_gem_stats {
	struct {
		unsigned count;
		size_t size;
186
	} all, active, resident, purgeable, purged;
187 188 189 190
};

void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
		struct msm_gem_stats *stats);
191 192 193
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
#endif

194 195 196
static inline void
msm_gem_lock(struct drm_gem_object *obj)
{
197
	dma_resv_lock(obj->resv, NULL);
198 199
}

200 201 202
static inline bool __must_check
msm_gem_trylock(struct drm_gem_object *obj)
{
203
	return dma_resv_trylock(obj->resv);
204 205
}

206 207 208
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
209
	return dma_resv_lock_interruptible(obj->resv, NULL);
210 211 212 213 214
}

static inline void
msm_gem_unlock(struct drm_gem_object *obj)
{
215
	dma_resv_unlock(obj->resv);
216 217 218 219 220
}

static inline bool
msm_gem_is_locked(struct drm_gem_object *obj)
{
221
	return dma_resv_is_locked(obj->resv);
222 223
}

Rob Clark's avatar
Rob Clark committed
224 225
static inline bool is_active(struct msm_gem_object *msm_obj)
{
226
	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
227
	return msm_obj->active_count;
Rob Clark's avatar
Rob Clark committed
228 229
}

230 231
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
232
{
233
	return msm_obj->base.import_attach || msm_obj->pin_count;
234 235
}

Rob Clark's avatar
Rob Clark committed
236 237 238
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
239
			!is_unpurgeable(msm_obj);
Rob Clark's avatar
Rob Clark committed
240 241
}

Rob Clark's avatar
Rob Clark committed
242 243
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
244
	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
Rob Clark's avatar
Rob Clark committed
245 246 247
	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}

248
static inline void mark_purgeable(struct msm_gem_object *msm_obj)
249 250 251
{
	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;

252
	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
253

254
	if (is_unpurgeable(msm_obj))
255 256
		return;

257
	if (GEM_WARN_ON(msm_obj->dontneed))
258 259 260 261 262 263
		return;

	priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
	msm_obj->dontneed = true;
}

264
static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
265 266 267
{
	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;

268
	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
269

270
	if (is_unpurgeable(msm_obj))
271 272
		return;

273
	if (GEM_WARN_ON(!msm_obj->dontneed))
274 275 276
		return;

	priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
277
	GEM_WARN_ON(priv->shrinkable_count < 0);
278 279 280
	msm_obj->dontneed = false;
}

281 282
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
283
	return is_unpurgeable(msm_obj) || msm_obj->vaddr;
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
}

static inline void mark_evictable(struct msm_gem_object *msm_obj)
{
	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;

	WARN_ON(!mutex_is_locked(&priv->mm_lock));

	if (is_unevictable(msm_obj))
		return;

	if (WARN_ON(msm_obj->evictable))
		return;

	priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
	msm_obj->evictable = true;
}

static inline void mark_unevictable(struct msm_gem_object *msm_obj)
{
	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;

	WARN_ON(!mutex_is_locked(&priv->mm_lock));

	if (is_unevictable(msm_obj))
		return;

	if (WARN_ON(!msm_obj->evictable))
		return;

	priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
	WARN_ON(priv->evictable_count < 0);
	msm_obj->evictable = false;
}

319
void msm_gem_purge(struct drm_gem_object *obj);
320
void msm_gem_evict(struct drm_gem_object *obj);
321
void msm_gem_vunmap(struct drm_gem_object *obj);
322

Rob Clark's avatar
Rob Clark committed
323 324
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
 * associated with the cmdstream submission for synchronization (and
Rob Clark's avatar
Rob Clark committed
325
 * make it easier to unwind when things go wrong, etc).
Rob Clark's avatar
Rob Clark committed
326 327
 */
struct msm_gem_submit {
328
	struct drm_sched_job base;
Rob Clark's avatar
Rob Clark committed
329
	struct kref ref;
Rob Clark's avatar
Rob Clark committed
330 331
	struct drm_device *dev;
	struct msm_gpu *gpu;
332
	struct msm_gem_address_space *aspace;
333
	struct list_head node;   /* node in ring submit list */
Rob Clark's avatar
Rob Clark committed
334
	struct ww_acquire_ctx ticket;
335
	uint32_t seqno;		/* Sequence number of the submit on the ring */
336 337 338 339 340 341 342 343 344 345 346

	/* Hw fence, which is created when the scheduler executes the job, and
	 * is signaled when the hw finishes (via seqno write from cmdstream)
	 */
	struct dma_fence *hw_fence;

	/* Userspace visible fence, which is signaled by the scheduler after
	 * the hw_fence is signaled.
	 */
	struct dma_fence *user_fence;

347
	int fence_id;       /* key into queue->fence_idr */
348
	struct msm_gpu_submitqueue *queue;
349
	struct pid *pid;    /* submitting process */
350
	bool fault_dumped;  /* Limit devcoredump dumping to one per submit */
351
	bool valid;         /* true if no cmdstream patching needed */
352
	bool in_rb;         /* "sudo" mode, copy cmds into RB */
353
	struct msm_ringbuffer *ring;
Rob Clark's avatar
Rob Clark committed
354 355
	unsigned int nr_cmds;
	unsigned int nr_bos;
356
	u32 ident;	   /* A "identifier" for the submit for logging */
Rob Clark's avatar
Rob Clark committed
357 358 359
	struct {
		uint32_t type;
		uint32_t size;  /* in dwords */
Rob Clark's avatar
Rob Clark committed
360
		uint64_t iova;
361
		uint32_t offset;/* in dwords */
Rob Clark's avatar
Rob Clark committed
362
		uint32_t idx;   /* cmdstream buffer idx in bos[] */
363 364
		uint32_t nr_relocs;
		struct drm_msm_gem_submit_reloc *relocs;
365
	} *cmd;  /* array of size nr_cmds */
Rob Clark's avatar
Rob Clark committed
366 367
	struct {
		uint32_t flags;
368 369 370 371
		union {
			struct msm_gem_object *obj;
			uint32_t handle;
		};
Rob Clark's avatar
Rob Clark committed
372
		uint64_t iova;
373
	} bos[];
Rob Clark's avatar
Rob Clark committed
374 375
};

376 377 378 379 380
static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
{
	return container_of(job, struct msm_gem_submit, base);
}

Rob Clark's avatar
Rob Clark committed
381 382 383 384 385 386 387 388 389 390 391 392
void __msm_gem_submit_destroy(struct kref *kref);

static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
{
	kref_get(&submit->ref);
}

static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
{
	kref_put(&submit->ref, __msm_gem_submit_destroy);
}

393 394
void msm_submit_retire(struct msm_gem_submit *submit);

395 396 397 398 399 400 401 402 403 404
/* helper to determine of a buffer in submit should be dumped, used for both
 * devcoredump and debugfs cmdstream dumping:
 */
static inline bool
should_dump(struct msm_gem_submit *submit, int idx)
{
	extern bool rd_full;
	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
}

405
#endif /* __MSM_GEM_H__ */