1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <linux/firmware.h>
26 #include "drmP.h"
27 #include "nouveau_drv.h"
28 #include "nouveau_util.h"
29 #include "nouveau_vm.h"
30 #include "nouveau_ramht.h"
31 #include "nva3_copy.fuc.h"
32 
33 struct nva3_copy_engine {
34 	struct nouveau_exec_engine base;
35 };
36 
37 static int
nva3_copy_context_new(struct nouveau_channel * chan,int engine)38 nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39 {
40 	struct drm_device *dev = chan->dev;
41 	struct drm_nouveau_private *dev_priv = dev->dev_private;
42 	struct nouveau_gpuobj *ramin = chan->ramin;
43 	struct nouveau_gpuobj *ctx = NULL;
44 	int ret;
45 
46 	NV_DEBUG(dev, "ch%d\n", chan->id);
47 
48 	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
49 				 NVOBJ_FLAG_ZERO_FREE, &ctx);
50 	if (ret)
51 		return ret;
52 
53 	nv_wo32(ramin, 0xc0, 0x00190000);
54 	nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
55 	nv_wo32(ramin, 0xc8, ctx->vinst);
56 	nv_wo32(ramin, 0xcc, 0x00000000);
57 	nv_wo32(ramin, 0xd0, 0x00000000);
58 	nv_wo32(ramin, 0xd4, 0x00000000);
59 	dev_priv->engine.instmem.flush(dev);
60 
61 	atomic_inc(&chan->vm->engref[engine]);
62 	chan->engctx[engine] = ctx;
63 	return 0;
64 }
65 
66 static int
nva3_copy_object_new(struct nouveau_channel * chan,int engine,u32 handle,u16 class)67 nva3_copy_object_new(struct nouveau_channel *chan, int engine,
68 		     u32 handle, u16 class)
69 {
70 	struct nouveau_gpuobj *ctx = chan->engctx[engine];
71 
72 	/* fuc engine doesn't need an object, our ramht code does.. */
73 	ctx->engine = 3;
74 	ctx->class  = class;
75 	return nouveau_ramht_insert(chan, handle, ctx);
76 }
77 
78 static void
nva3_copy_context_del(struct nouveau_channel * chan,int engine)79 nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80 {
81 	struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 	struct drm_device *dev = chan->dev;
83 	u32 inst;
84 
85 	inst  = (chan->ramin->vinst >> 12);
86 	inst |= 0x40000000;
87 
88 	/* disable fifo access */
89 	nv_wr32(dev, 0x104048, 0x00000000);
90 	/* mark channel as unloaded if it's currently active */
91 	if (nv_rd32(dev, 0x104050) == inst)
92 		nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
93 	/* mark next channel as invalid if it's about to be loaded */
94 	if (nv_rd32(dev, 0x104054) == inst)
95 		nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
96 	/* restore fifo access */
97 	nv_wr32(dev, 0x104048, 0x00000003);
98 
99 	for (inst = 0xc0; inst <= 0xd4; inst += 4)
100 		nv_wo32(chan->ramin, inst, 0x00000000);
101 
102 	nouveau_gpuobj_ref(NULL, &ctx);
103 
104 	atomic_dec(&chan->vm->engref[engine]);
105 	chan->engctx[engine] = ctx;
106 }
107 
108 static void
nva3_copy_tlb_flush(struct drm_device * dev,int engine)109 nva3_copy_tlb_flush(struct drm_device *dev, int engine)
110 {
111 	nv50_vm_flush_engine(dev, 0x0d);
112 }
113 
114 static int
nva3_copy_init(struct drm_device * dev,int engine)115 nva3_copy_init(struct drm_device *dev, int engine)
116 {
117 	int i;
118 
119 	nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
120 	nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
121 	nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
122 
123 	/* upload ucode */
124 	nv_wr32(dev, 0x1041c0, 0x01000000);
125 	for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
126 		nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
127 
128 	nv_wr32(dev, 0x104180, 0x01000000);
129 	for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
130 		if ((i & 0x3f) == 0)
131 			nv_wr32(dev, 0x104188, i >> 6);
132 		nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
133 	}
134 
135 	/* start it running */
136 	nv_wr32(dev, 0x10410c, 0x00000000);
137 	nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
138 	nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
139 	return 0;
140 }
141 
142 static int
nva3_copy_fini(struct drm_device * dev,int engine,bool suspend)143 nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
144 {
145 	nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146 
147 	/* trigger fuc context unload */
148 	nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
149 	nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
150 	nv_wr32(dev, 0x104000, 0x00000008);
151 	nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
152 
153 	nv_wr32(dev, 0x104014, 0xffffffff);
154 	return 0;
155 }
156 
157 static struct nouveau_enum nva3_copy_isr_error_name[] = {
158 	{ 0x0001, "ILLEGAL_MTHD" },
159 	{ 0x0002, "INVALID_ENUM" },
160 	{ 0x0003, "INVALID_BITFIELD" },
161 	{}
162 };
163 
164 static void
nva3_copy_isr(struct drm_device * dev)165 nva3_copy_isr(struct drm_device *dev)
166 {
167 	u32 dispatch = nv_rd32(dev, 0x10401c);
168 	u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
169 	u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
170 	u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
171 	u32 addr = nv_rd32(dev, 0x104040) >> 16;
172 	u32 mthd = (addr & 0x07ff) << 2;
173 	u32 subc = (addr & 0x3800) >> 11;
174 	u32 data = nv_rd32(dev, 0x104044);
175 	int chid = nv50_graph_isr_chid(dev, inst);
176 
177 	if (stat & 0x00000040) {
178 		NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
179 		nouveau_enum_print(nva3_copy_isr_error_name, ssta);
180 		printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
181 			chid, inst, subc, mthd, data);
182 		nv_wr32(dev, 0x104004, 0x00000040);
183 		stat &= ~0x00000040;
184 	}
185 
186 	if (stat) {
187 		NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
188 		nv_wr32(dev, 0x104004, stat);
189 	}
190 	nv50_fb_vm_trap(dev, 1);
191 }
192 
193 static void
nva3_copy_destroy(struct drm_device * dev,int engine)194 nva3_copy_destroy(struct drm_device *dev, int engine)
195 {
196 	struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
197 
198 	nouveau_irq_unregister(dev, 22);
199 
200 	NVOBJ_ENGINE_DEL(dev, COPY0);
201 	kfree(pcopy);
202 }
203 
204 int
nva3_copy_create(struct drm_device * dev)205 nva3_copy_create(struct drm_device *dev)
206 {
207 	struct nva3_copy_engine *pcopy;
208 
209 	pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
210 	if (!pcopy)
211 		return -ENOMEM;
212 
213 	pcopy->base.destroy = nva3_copy_destroy;
214 	pcopy->base.init = nva3_copy_init;
215 	pcopy->base.fini = nva3_copy_fini;
216 	pcopy->base.context_new = nva3_copy_context_new;
217 	pcopy->base.context_del = nva3_copy_context_del;
218 	pcopy->base.object_new = nva3_copy_object_new;
219 	pcopy->base.tlb_flush = nva3_copy_tlb_flush;
220 
221 	nouveau_irq_register(dev, 22, nva3_copy_isr);
222 
223 	NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
224 	NVOBJ_CLASS(dev, 0x85b5, COPY0);
225 	return 0;
226 }
227