xref: /linux/drivers/gpu/drm/nouveau/nouveau_dma.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_vmm.h"
30 
31 #include <nvif/user.h>
32 
33 /* Fetch and adjust GPU GET pointer
34  *
35  * Returns:
36  *  value >= 0, the adjusted GET pointer
37  *  -EINVAL if GET pointer currently outside main push buffer
38  *  -EBUSY if timeout exceeded
39  */
40 static inline int
READ_GET(struct nouveau_channel * chan,uint64_t * prev_get,int * timeout)41 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
42 {
43 	uint64_t val;
44 
45 	val = nvif_rd32(chan->userd, chan->user_get);
46 
47 	/* reset counter as long as GET is still advancing, this is
48 	 * to avoid misdetecting a GPU lockup if the GPU happens to
49 	 * just be processing an operation that takes a long time
50 	 */
51 	if (val != *prev_get) {
52 		*prev_get = val;
53 		*timeout = 0;
54 	}
55 
56 	if ((++*timeout & 0xff) == 0) {
57 		udelay(1);
58 		if (*timeout > 100000)
59 			return -EBUSY;
60 	}
61 
62 	if (val < chan->push.addr ||
63 	    val > chan->push.addr + (chan->dma.max << 2))
64 		return -EINVAL;
65 
66 	return (val - chan->push.addr) >> 2;
67 }
68 
69 int
nouveau_dma_wait(struct nouveau_channel * chan,int size)70 nouveau_dma_wait(struct nouveau_channel *chan, int size)
71 {
72 	uint64_t prev_get = 0;
73 	int cnt = 0, get;
74 
75 	while (chan->dma.free < size) {
76 		get = READ_GET(chan, &prev_get, &cnt);
77 		if (unlikely(get == -EBUSY))
78 			return -EBUSY;
79 
80 		/* loop until we have a usable GET pointer.  the value
81 		 * we read from the GPU may be outside the main ring if
82 		 * PFIFO is processing a buffer called from the main ring,
83 		 * discard these values until something sensible is seen.
84 		 *
85 		 * the other case we discard GET is while the GPU is fetching
86 		 * from the SKIPS area, so the code below doesn't have to deal
87 		 * with some fun corner cases.
88 		 */
89 		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
90 			continue;
91 
92 		if (get <= chan->dma.cur) {
93 			/* engine is fetching behind us, or is completely
94 			 * idle (GET == PUT) so we have free space up until
95 			 * the end of the push buffer
96 			 *
97 			 * we can only hit that path once per call due to
98 			 * looping back to the beginning of the push buffer,
99 			 * we'll hit the fetching-ahead-of-us path from that
100 			 * point on.
101 			 *
102 			 * the *one* exception to that rule is if we read
103 			 * GET==PUT, in which case the below conditional will
104 			 * always succeed and break us out of the wait loop.
105 			 */
106 			chan->dma.free = chan->dma.max - chan->dma.cur;
107 			if (chan->dma.free >= size)
108 				break;
109 
110 			/* not enough space left at the end of the push buffer,
111 			 * instruct the GPU to jump back to the start right
112 			 * after processing the currently pending commands.
113 			 */
114 			OUT_RING(chan, chan->push.addr | 0x20000000);
115 
116 			/* wait for GET to depart from the skips area.
117 			 * prevents writing GET==PUT and causing a race
118 			 * condition that causes us to think the GPU is
119 			 * idle when it's not.
120 			 */
121 			do {
122 				get = READ_GET(chan, &prev_get, &cnt);
123 				if (unlikely(get == -EBUSY))
124 					return -EBUSY;
125 				if (unlikely(get == -EINVAL))
126 					continue;
127 			} while (get <= NOUVEAU_DMA_SKIPS);
128 			WRITE_PUT(NOUVEAU_DMA_SKIPS);
129 
130 			/* we're now submitting commands at the start of
131 			 * the push buffer.
132 			 */
133 			chan->dma.cur  =
134 			chan->dma.put  = NOUVEAU_DMA_SKIPS;
135 		}
136 
137 		/* engine fetching ahead of us, we have space up until the
138 		 * current GET pointer.  the "- 1" is to ensure there's
139 		 * space left to emit a jump back to the beginning of the
140 		 * push buffer if we require it.  we can never get GET == PUT
141 		 * here, so this is safe.
142 		 */
143 		chan->dma.free = get - chan->dma.cur - 1;
144 	}
145 
146 	return 0;
147 }
148 
149