dma-resv.c 22 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Based on bo.c which bears the following copyright notice,
 * but is dual licensed:
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

36
#include <linux/dma-resv.h>
37
#include <linux/dma-fence-array.h>
38
#include <linux/export.h>
39
#include <linux/mm.h>
40
#include <linux/sched/mm.h>
41
#include <linux/mmu_notifier.h>
42
#include <linux/seq_file.h>
43

44 45 46
/**
 * DOC: Reservation Object Overview
 *
47 48 49 50 51 52
 * The reservation object provides a mechanism to manage a container of
 * dma_fence object associated with a resource. A reservation object
 * can have any number of fences attaches to it. Each fence carries an usage
 * parameter determining how the operation represented by the fence is using the
 * resource. The RCU mechanism is used to protect read access to fences from
 * locked write-side updates.
53 54
 *
 * See struct dma_resv for more details.
55 56
 */

57
DEFINE_WD_CLASS(reservation_ww_class);
58
EXPORT_SYMBOL(reservation_ww_class);
59

60 61 62
/* Mask for the lower fence pointer bits */
#define DMA_RESV_LIST_MASK	0x3

63 64
struct dma_resv_list {
	struct rcu_head rcu;
65 66
	u32 num_fences, max_fences;
	struct dma_fence __rcu *table[];
67 68
};

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/* Extract the fence and usage flags from an RCU protected entry in the list. */
static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
				struct dma_resv *resv, struct dma_fence **fence,
				enum dma_resv_usage *usage)
{
	long tmp;

	tmp = (long)rcu_dereference_check(list->table[index],
					  resv ? dma_resv_held(resv) : true);
	*fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
	if (usage)
		*usage = tmp & DMA_RESV_LIST_MASK;
}

/* Set the fence and usage flags at the specific index in the list. */
static void dma_resv_list_set(struct dma_resv_list *list,
			      unsigned int index,
			      struct dma_fence *fence,
			      enum dma_resv_usage usage)
{
	long tmp = ((long)fence) | usage;

	RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
}

/*
95
 * Allocate a new dma_resv_list and make sure to correctly initialize
96
 * max_fences.
97
 */
98
static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99
{
100
	struct dma_resv_list *list;
101
	size_t size;
102

103 104 105 106
	/* Round up to the next kmalloc bucket size. */
	size = kmalloc_size_roundup(struct_size(list, table, max_fences));

	list = kmalloc(size, GFP_KERNEL);
107 108 109
	if (!list)
		return NULL;

110 111
	/* Given the resulting bucket size, recalculated max_fences. */
	list->max_fences = (size - offsetof(typeof(*list), table)) /
112
		sizeof(*list->table);
113 114 115 116

	return list;
}

117
/* Free a dma_resv_list and make sure to drop all references. */
118
static void dma_resv_list_free(struct dma_resv_list *list)
119 120 121 122 123 124
{
	unsigned int i;

	if (!list)
		return;

125 126
	for (i = 0; i < list->num_fences; ++i) {
		struct dma_fence *fence;
127

128 129 130
		dma_resv_list_entry(list, i, NULL, &fence, NULL);
		dma_fence_put(fence);
	}
131 132 133
	kfree_rcu(list, rcu);
}

134
/**
135
 * dma_resv_init - initialize a reservation object
136 137
 * @obj: the reservation object
 */
138
void dma_resv_init(struct dma_resv *obj)
139 140
{
	ww_mutex_init(&obj->lock, &reservation_ww_class);
141

142
	RCU_INIT_POINTER(obj->fences, NULL);
143
}
144
EXPORT_SYMBOL(dma_resv_init);
145 146

/**
147
 * dma_resv_fini - destroys a reservation object
148 149
 * @obj: the reservation object
 */
150
void dma_resv_fini(struct dma_resv *obj)
151 152 153 154 155
{
	/*
	 * This object should be dead and all references must have
	 * been released to it, so no need to be protected with rcu.
	 */
156
	dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
157 158
	ww_mutex_destroy(&obj->lock);
}
159
EXPORT_SYMBOL(dma_resv_fini);
160

161 162
/* Dereference the fences while ensuring RCU rules */
static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
163
{
164
	return rcu_dereference_check(obj->fences, dma_resv_held(obj));
165 166
}

167
/**
168
 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
169
 * @obj: reservation object
170
 * @num_fences: number of fences we want to add
171
 *
172 173
 * Should be called before dma_resv_add_fence().  Must be called with @obj
 * locked through dma_resv_lock().
174 175
 *
 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
176 177
 * at any time before calling dma_resv_add_fence(). This is validated when
 * CONFIG_DEBUG_MUTEXES is enabled.
178 179 180
 *
 * RETURNS
 * Zero for success, or -errno
181
 */
182
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
183
{
184
	struct dma_resv_list *old, *new;
185
	unsigned int i, j, k, max;
186

187
	dma_resv_assert_held(obj);
188

189 190 191
	old = dma_resv_fences_list(obj);
	if (old && old->max_fences) {
		if ((old->num_fences + num_fences) <= old->max_fences)
192
			return 0;
193
		max = max(old->num_fences + num_fences, old->max_fences * 2);
194
	} else {
195
		max = max(4ul, roundup_pow_of_two(num_fences));
196
	}
197

198
	new = dma_resv_list_alloc(max);
199 200
	if (!new)
		return -ENOMEM;
201 202 203 204 205 206 207

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
208 209
	for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
		enum dma_resv_usage usage;
210
		struct dma_fence *fence;
211

212
		dma_resv_list_entry(old, i, obj, &fence, &usage);
213
		if (dma_fence_is_signaled(fence))
214
			RCU_INIT_POINTER(new->table[--k], fence);
215
		else
216
			dma_resv_list_set(new, j++, fence, usage);
217
	}
218
	new->num_fences = j;
219

220
	/*
221 222 223
	 * We are not changing the effective set of fences here so can
	 * merely update the pointer to the new array; both existing
	 * readers and new readers will see exactly the same set of
224
	 * active (unsignaled) fences. Individual fences and the
225 226
	 * old array are protected by RCU and so will not vanish under
	 * the gaze of the rcu_read_lock() readers.
227
	 */
228
	rcu_assign_pointer(obj->fences, new);
229

230
	if (!old)
231
		return 0;
232

233
	/* Drop the references to the signaled fences */
234
	for (i = k; i < max; ++i) {
235
		struct dma_fence *fence;
236

237
		fence = rcu_dereference_protected(new->table[i],
238
						  dma_resv_held(obj));
239
		dma_fence_put(fence);
240 241
	}
	kfree_rcu(old, rcu);
242 243

	return 0;
244
}
245
EXPORT_SYMBOL(dma_resv_reserve_fences);
246

247 248
#ifdef CONFIG_DEBUG_MUTEXES
/**
249
 * dma_resv_reset_max_fences - reset fences for debugging
250 251
 * @obj: the dma_resv object to reset
 *
252
 * Reset the number of pre-reserved fence slots to test that drivers do
253
 * correct slot allocation using dma_resv_reserve_fences(). See also
254
 * &dma_resv_list.max_fences.
255
 */
256
void dma_resv_reset_max_fences(struct dma_resv *obj)
257
{
258
	struct dma_resv_list *fences = dma_resv_fences_list(obj);
259

260 261
	dma_resv_assert_held(obj);

262
	/* Test fence slot reservation */
263
	if (fences)
264
		fences->max_fences = fences->num_fences;
265
}
266
EXPORT_SYMBOL(dma_resv_reset_max_fences);
267 268
#endif

269
/**
270
 * dma_resv_add_fence - Add a fence to the dma_resv obj
271
 * @obj: the reservation object
272 273
 * @fence: the fence to add
 * @usage: how the fence is used, see enum dma_resv_usage
274
 *
275
 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
276
 * dma_resv_reserve_fences() has been called.
277 278
 *
 * See also &dma_resv.fence for a discussion of the semantics.
279
 */
280 281
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
			enum dma_resv_usage usage)
282
{
283
	struct dma_resv_list *fobj;
284
	struct dma_fence *old;
285
	unsigned int i, count;
286

287 288
	dma_fence_get(fence);

289
	dma_resv_assert_held(obj);
290

291 292 293 294 295
	/* Drivers should not add containers here, instead add each fence
	 * individually.
	 */
	WARN_ON(dma_fence_is_container(fence));

296 297
	fobj = dma_resv_fences_list(obj);
	count = fobj->num_fences;
298

299
	for (i = 0; i < count; ++i) {
300
		enum dma_resv_usage old_usage;
301

302
		dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
303 304
		if ((old->context == fence->context && old_usage >= usage &&
		     dma_fence_is_later(fence, old)) ||
305 306 307 308 309
		    dma_fence_is_signaled(old)) {
			dma_resv_list_set(fobj, i, fence, usage);
			dma_fence_put(old);
			return;
		}
310 311
	}

312
	BUG_ON(fobj->num_fences >= fobj->max_fences);
313
	count++;
314

315 316 317
	dma_resv_list_set(fobj, i, fence, usage);
	/* pointer update must be visible before we extend the num_fences */
	smp_store_mb(fobj->num_fences, count);
318
}
319
EXPORT_SYMBOL(dma_resv_add_fence);
320

321 322 323 324 325
/**
 * dma_resv_replace_fences - replace fences in the dma_resv obj
 * @obj: the reservation object
 * @context: the context of the fences to replace
 * @replacement: the new fence to use instead
326
 * @usage: how the new fence is used, see enum dma_resv_usage
327 328 329 330 331 332 333 334 335
 *
 * Replace fences with a specified context with a new fence. Only valid if the
 * operation represented by the original fence has no longer access to the
 * resources represented by the dma_resv object when the new fence completes.
 *
 * And example for using this is replacing a preemption fence with a page table
 * update fence which makes the resource inaccessible.
 */
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
336 337
			     struct dma_fence *replacement,
			     enum dma_resv_usage usage)
338 339 340 341 342 343
{
	struct dma_resv_list *list;
	unsigned int i;

	dma_resv_assert_held(obj);

344 345 346
	list = dma_resv_fences_list(obj);
	for (i = 0; list && i < list->num_fences; ++i) {
		struct dma_fence *old;
347

348
		dma_resv_list_entry(list, i, obj, &old, NULL);
349 350 351
		if (old->context != context)
			continue;

352
		dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
353 354 355 356 357
		dma_fence_put(old);
	}
}
EXPORT_SYMBOL(dma_resv_replace_fences);

358
/* Restart the unlocked iteration by initializing the cursor object. */
359 360
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
361 362 363 364 365
	cursor->index = 0;
	cursor->num_fences = 0;
	cursor->fences = dma_resv_fences_list(cursor->obj);
	if (cursor->fences)
		cursor->num_fences = cursor->fences->num_fences;
366 367 368
	cursor->is_restarted = true;
}

369
/* Walk to the next not signaled fence and grab a reference to it */
370 371
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
372 373
	if (!cursor->fences)
		return;
374 375 376 377 378

	do {
		/* Drop the reference from the previous round */
		dma_fence_put(cursor->fence);

379
		if (cursor->index >= cursor->num_fences) {
380 381 382 383
			cursor->fence = NULL;
			break;

		}
384 385 386 387

		dma_resv_list_entry(cursor->fences, cursor->index++,
				    cursor->obj, &cursor->fence,
				    &cursor->fence_usage);
388
		cursor->fence = dma_fence_get_rcu(cursor->fence);
389 390 391 392
		if (!cursor->fence) {
			dma_resv_iter_restart_unlocked(cursor);
			continue;
		}
393 394 395

		if (!dma_fence_is_signaled(cursor->fence) &&
		    cursor->usage >= cursor->fence_usage)
396 397 398 399 400 401 402 403
			break;
	} while (true);
}

/**
 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
 * @cursor: the cursor with the current position
 *
404 405 406 407 408 409
 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
 *
 * Beware that the iterator can be restarted.  Code which accumulates statistics
 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
 * this reason prefer the locked dma_resv_iter_first() whenver possible.
 *
410 411 412 413 414 415 416 417
 * Returns the first fence from an unlocked dma_resv obj.
 */
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
{
	rcu_read_lock();
	do {
		dma_resv_iter_restart_unlocked(cursor);
		dma_resv_iter_walk_unlocked(cursor);
418
	} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
419 420 421 422 423 424 425 426 427 428
	rcu_read_unlock();

	return cursor->fence;
}
EXPORT_SYMBOL(dma_resv_iter_first_unlocked);

/**
 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
 * @cursor: the cursor with the current position
 *
429 430 431 432
 * Beware that the iterator can be restarted.  Code which accumulates statistics
 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
 * this reason prefer the locked dma_resv_iter_next() whenver possible.
 *
433 434 435 436 437 438 439 440
 * Returns the next fence from an unlocked dma_resv obj.
 */
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
{
	bool restart;

	rcu_read_lock();
	cursor->is_restarted = false;
441
	restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
442 443 444 445 446
	do {
		if (restart)
			dma_resv_iter_restart_unlocked(cursor);
		dma_resv_iter_walk_unlocked(cursor);
		restart = true;
447
	} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
448 449 450 451 452 453
	rcu_read_unlock();

	return cursor->fence;
}
EXPORT_SYMBOL(dma_resv_iter_next_unlocked);

454 455 456 457
/**
 * dma_resv_iter_first - first fence from a locked dma_resv object
 * @cursor: cursor to record the current position
 *
458 459
 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
 *
460 461 462 463 464 465 466 467 468 469
 * Return the first fence in the dma_resv object while holding the
 * &dma_resv.lock.
 */
struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
{
	struct dma_fence *fence;

	dma_resv_assert_held(cursor->obj);

	cursor->index = 0;
470
	cursor->fences = dma_resv_fences_list(cursor->obj);
471

472
	fence = dma_resv_iter_next(cursor);
473 474 475 476 477 478 479 480 481 482 483 484 485 486
	cursor->is_restarted = true;
	return fence;
}
EXPORT_SYMBOL_GPL(dma_resv_iter_first);

/**
 * dma_resv_iter_next - next fence from a locked dma_resv object
 * @cursor: cursor to record the current position
 *
 * Return the next fences from the dma_resv object while holding the
 * &dma_resv.lock.
 */
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
{
487
	struct dma_fence *fence;
488 489 490 491 492

	dma_resv_assert_held(cursor->obj);

	cursor->is_restarted = false;

493 494 495 496 497 498 499 500 501 502
	do {
		if (!cursor->fences ||
		    cursor->index >= cursor->fences->num_fences)
			return NULL;

		dma_resv_list_entry(cursor->fences, cursor->index++,
				    cursor->obj, &fence, &cursor->fence_usage);
	} while (cursor->fence_usage > cursor->usage);

	return fence;
503 504 505
}
EXPORT_SYMBOL_GPL(dma_resv_iter_next);

506
/**
507 508 509 510 511 512
 * dma_resv_copy_fences - Copy all fences from src to dst.
 * @dst: the destination reservation object
 * @src: the source reservation object
 *
 * Copy all fences from src to dst. dst-lock must be held.
 */
513
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
514
{
515 516
	struct dma_resv_iter cursor;
	struct dma_resv_list *list;
517
	struct dma_fence *f;
518

519
	dma_resv_assert_held(dst);
520

521
	list = NULL;
522

523
	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
524
	dma_resv_for_each_fence_unlocked(&cursor, f) {
525

526 527
		if (dma_resv_iter_is_restarted(&cursor)) {
			dma_resv_list_free(list);
528

529 530 531 532
			list = dma_resv_list_alloc(cursor.num_fences);
			if (!list) {
				dma_resv_iter_end(&cursor);
				return -ENOMEM;
533
			}
534
			list->num_fences = 0;
535 536
		}

537
		dma_fence_get(f);
538 539
		dma_resv_list_set(list, list->num_fences++, f,
				  dma_resv_iter_usage(&cursor));
540 541
	}
	dma_resv_iter_end(&cursor);
542

543
	list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
544
	dma_resv_list_free(list);
545 546
	return 0;
}
547
EXPORT_SYMBOL(dma_resv_copy_fences);
548

549
/**
550
 * dma_resv_get_fences - Get an object's fences
551 552
 * fences without update side lock held
 * @obj: the reservation object
553
 * @usage: controls which fences to include, see enum dma_resv_usage.
554 555 556
 * @num_fences: the number of fences returned
 * @fences: the array of fence ptrs returned (array is krealloc'd to the
 * required size, and must be freed by caller)
557
 *
558 559
 * Retrieve all fences from the reservation object.
 * Returns either zero or -ENOMEM.
560
 */
561
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
562
			unsigned int *num_fences, struct dma_fence ***fences)
563
{
564 565
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
566

567 568
	*num_fences = 0;
	*fences = NULL;
569

570
	dma_resv_iter_begin(&cursor, obj, usage);
571
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
572

573
		if (dma_resv_iter_is_restarted(&cursor)) {
574
			struct dma_fence **new_fences;
575
			unsigned int count;
576

577 578
			while (*num_fences)
				dma_fence_put((*fences)[--(*num_fences)]);
579

580
			count = cursor.num_fences + 1;
581

582
			/* Eventually re-allocate the array */
583 584 585 586 587 588 589
			new_fences = krealloc_array(*fences, count,
						    sizeof(void *),
						    GFP_KERNEL);
			if (count && !new_fences) {
				kfree(*fences);
				*fences = NULL;
				*num_fences = 0;
590 591
				dma_resv_iter_end(&cursor);
				return -ENOMEM;
592
			}
593
			*fences = new_fences;
594
		}
595

596
		(*fences)[(*num_fences)++] = dma_fence_get(fence);
597
	}
598
	dma_resv_iter_end(&cursor);
599

600
	return 0;
601
}
602
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
603

604 605 606
/**
 * dma_resv_get_singleton - Get a single fence for all the fences
 * @obj: the reservation object
607
 * @usage: controls which fences to include, see enum dma_resv_usage.
608 609 610 611 612 613 614 615 616 617 618
 * @fence: the resulting fence
 *
 * Get a single fence representing all the fences inside the resv object.
 * Returns either 0 for success or -ENOMEM.
 *
 * Warning: This can't be used like this when adding the fence back to the resv
 * object since that can lead to stack corruption when finalizing the
 * dma_fence_array.
 *
 * Returns 0 on success and negative error values on failure.
 */
619
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
620 621 622 623 624 625 626
			   struct dma_fence **fence)
{
	struct dma_fence_array *array;
	struct dma_fence **fences;
	unsigned count;
	int r;

627
	r = dma_resv_get_fences(obj, usage, &count, &fences);
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
        if (r)
		return r;

	if (count == 0) {
		*fence = NULL;
		return 0;
	}

	if (count == 1) {
		*fence = fences[0];
		kfree(fences);
		return 0;
	}

	array = dma_fence_array_create(count, fences,
				       dma_fence_context_alloc(1),
				       1, false);
	if (!array) {
		while (count--)
			dma_fence_put(fences[count]);
		kfree(fences);
		return -ENOMEM;
	}

	*fence = &array->base;
	return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);

657
/**
658
 * dma_resv_wait_timeout - Wait on reservation's objects fences
659
 * @obj: the reservation object
660
 * @usage: controls which fences to include, see enum dma_resv_usage.
661 662 663
 * @intr: if true, do interruptible wait
 * @timeout: timeout value in jiffies or zero to return immediately
 *
664 665
 * Callers are not required to hold specific locks, but maybe hold
 * dma_resv_lock() already
666 667
 * RETURNS
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
668
 * greater than zero on success.
669
 */
670 671
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
			   bool intr, unsigned long timeout)
672
{
673
	long ret = timeout ? timeout : 1;
674
	struct dma_resv_iter cursor;
675
	struct dma_fence *fence;
676

677
	dma_resv_iter_begin(&cursor, obj, usage);
678
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
679

680 681 682 683
		ret = dma_fence_wait_timeout(fence, intr, ret);
		if (ret <= 0) {
			dma_resv_iter_end(&cursor);
			return ret;
684 685
		}
	}
686
	dma_resv_iter_end(&cursor);
687 688 689

	return ret;
}
690
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
691

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/**
 * dma_resv_set_deadline - Set a deadline on reservation's objects fences
 * @obj: the reservation object
 * @usage: controls which fences to include, see enum dma_resv_usage.
 * @deadline: the requested deadline (MONOTONIC)
 *
 * May be called without holding the dma_resv lock.  Sets @deadline on
 * all fences filtered by @usage.
 */
void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
			   ktime_t deadline)
{
	struct dma_resv_iter cursor;
	struct dma_fence *fence;

	dma_resv_iter_begin(&cursor, obj, usage);
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
		dma_fence_set_deadline(fence, deadline);
	}
	dma_resv_iter_end(&cursor);
}
EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
714

715
/**
716 717
 * dma_resv_test_signaled - Test if a reservation object's fences have been
 * signaled.
718
 * @obj: the reservation object
719
 * @usage: controls which fences to include, see enum dma_resv_usage.
720
 *
721
 * Callers are not required to hold specific locks, but maybe hold
722 723
 * dma_resv_lock() already.
 *
724
 * RETURNS
725 726
 *
 * True if all fences signaled, else false.
727
 */
728
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
729
{
730
	struct dma_resv_iter cursor;
731
	struct dma_fence *fence;
732

733
	dma_resv_iter_begin(&cursor, obj, usage);
734 735 736
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
		dma_resv_iter_end(&cursor);
		return false;
737
	}
738 739
	dma_resv_iter_end(&cursor);
	return true;
740
}
741
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
742

743 744 745 746 747 748 749 750 751 752
/**
 * dma_resv_describe - Dump description of the resv object into seq_file
 * @obj: the reservation object
 * @seq: the seq_file to dump the description into
 *
 * Dump a textual description of the fences inside an dma_resv object into the
 * seq_file.
 */
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
753
	static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
754 755 756
	struct dma_resv_iter cursor;
	struct dma_fence *fence;

757
	dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
758
		seq_printf(seq, "\t%s fence:",
759
			   usage[dma_resv_iter_usage(&cursor)]);
760 761 762 763 764
		dma_fence_describe(fence, seq);
	}
}
EXPORT_SYMBOL_GPL(dma_resv_describe);

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
{
	struct mm_struct *mm = mm_alloc();
	struct ww_acquire_ctx ctx;
	struct dma_resv obj;
	struct address_space mapping;
	int ret;

	if (!mm)
		return -ENOMEM;

	dma_resv_init(&obj);
	address_space_init_once(&mapping);

	mmap_read_lock(mm);
	ww_acquire_init(&ctx, &reservation_ww_class);
	ret = dma_resv_lock(&obj, &ctx);
	if (ret == -EDEADLK)
		dma_resv_lock_slow(&obj, &ctx);
	fs_reclaim_acquire(GFP_KERNEL);
	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
	i_mmap_lock_write(&mapping);
	i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
	__dma_fence_might_wait();
	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
	__dma_fence_might_wait();
#endif
	fs_reclaim_release(GFP_KERNEL);
	ww_mutex_unlock(&obj.lock);
	ww_acquire_fini(&ctx);
	mmap_read_unlock(mm);

	mmput(mm);

	return 0;
}
subsys_initcall(dma_resv_lockdep);
#endif