xref: /linux/drivers/gpu/drm/ttm/ttm_execbuf_util.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <linux/export.h>
30 
31 #include <drm/ttm/ttm_execbuf_util.h>
32 #include <drm/ttm/ttm_bo.h>
33 
ttm_eu_backoff_reservation_reverse(struct list_head * list,struct ttm_validate_buffer * entry)34 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
35 					      struct ttm_validate_buffer *entry)
36 {
37 	list_for_each_entry_continue_reverse(entry, list, head) {
38 		struct ttm_buffer_object *bo = entry->bo;
39 
40 		dma_resv_unlock(bo->base.resv);
41 	}
42 }
43 
ttm_eu_backoff_reservation(struct ww_acquire_ctx * ticket,struct list_head * list)44 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
45 				struct list_head *list)
46 {
47 	struct ttm_validate_buffer *entry;
48 
49 	if (list_empty(list))
50 		return;
51 
52 	list_for_each_entry(entry, list, head) {
53 		struct ttm_buffer_object *bo = entry->bo;
54 
55 		ttm_bo_move_to_lru_tail_unlocked(bo);
56 		dma_resv_unlock(bo->base.resv);
57 	}
58 
59 	if (ticket)
60 		ww_acquire_fini(ticket);
61 }
62 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
63 
64 /*
65  * Reserve buffers for validation.
66  *
67  * If a buffer in the list is marked for CPU access, we back off and
68  * wait for that buffer to become free for GPU access.
69  *
70  * If a buffer is reserved for another validation, the validator with
71  * the highest validation sequence backs off and waits for that buffer
72  * to become unreserved. This prevents deadlocks when validating multiple
73  * buffers in different orders.
74  */
75 
ttm_eu_reserve_buffers(struct ww_acquire_ctx * ticket,struct list_head * list,bool intr,struct list_head * dups)76 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
77 			   struct list_head *list, bool intr,
78 			   struct list_head *dups)
79 {
80 	struct ttm_validate_buffer *entry;
81 	int ret;
82 
83 	if (list_empty(list))
84 		return 0;
85 
86 	if (ticket)
87 		ww_acquire_init(ticket, &reservation_ww_class);
88 
89 	list_for_each_entry(entry, list, head) {
90 		struct ttm_buffer_object *bo = entry->bo;
91 		unsigned int num_fences;
92 
93 		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
94 		if (ret == -EALREADY && dups) {
95 			struct ttm_validate_buffer *safe = entry;
96 			entry = list_prev_entry(entry, head);
97 			list_del(&safe->head);
98 			list_add(&safe->head, dups);
99 			continue;
100 		}
101 
102 		num_fences = max(entry->num_shared, 1u);
103 		if (!ret) {
104 			ret = dma_resv_reserve_fences(bo->base.resv,
105 						      num_fences);
106 			if (!ret)
107 				continue;
108 		}
109 
110 		/* uh oh, we lost out, drop every reservation and try
111 		 * to only reserve this buffer, then start over if
112 		 * this succeeds.
113 		 */
114 		ttm_eu_backoff_reservation_reverse(list, entry);
115 
116 		if (ret == -EDEADLK) {
117 			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
118 		}
119 
120 		if (!ret)
121 			ret = dma_resv_reserve_fences(bo->base.resv,
122 						      num_fences);
123 
124 		if (unlikely(ret != 0)) {
125 			if (ticket) {
126 				ww_acquire_done(ticket);
127 				ww_acquire_fini(ticket);
128 			}
129 			return ret;
130 		}
131 
132 		/* move this item to the front of the list,
133 		 * forces correct iteration of the loop without keeping track
134 		 */
135 		list_del(&entry->head);
136 		list_add(&entry->head, list);
137 	}
138 
139 	return 0;
140 }
141 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
142 
ttm_eu_fence_buffer_objects(struct ww_acquire_ctx * ticket,struct list_head * list,struct dma_fence * fence)143 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
144 				 struct list_head *list,
145 				 struct dma_fence *fence)
146 {
147 	struct ttm_validate_buffer *entry;
148 
149 	if (list_empty(list))
150 		return;
151 
152 	list_for_each_entry(entry, list, head) {
153 		struct ttm_buffer_object *bo = entry->bo;
154 
155 		dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
156 				   DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
157 		ttm_bo_move_to_lru_tail_unlocked(bo);
158 		dma_resv_unlock(bo->base.resv);
159 	}
160 	if (ticket)
161 		ww_acquire_fini(ticket);
162 }
163 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
164