1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "runl.h"
28 #include "priv.h"
29
30 #include <core/ramht.h>
31 #include <subdev/mmu.h>
32 #include <engine/dma.h>
33
34 #include <nvif/if0020.h>
35
36 const struct nvkm_event_func
37 nvkm_chan_event = {
38 };
39
40 void
nvkm_chan_cctx_bind(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx * cctx)41 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
42 {
43 struct nvkm_cgrp *cgrp = chan->cgrp;
44 struct nvkm_runl *runl = cgrp->runl;
45 struct nvkm_engine *engine = engn->engine;
46
47 if (!engn->func->bind)
48 return;
49
50 CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
51
52 /* Prevent any channel in channel group from being rescheduled, kick them
53 * off host and any engine(s) they're loaded on.
54 */
55 if (cgrp->hw)
56 nvkm_runl_block(runl);
57 else
58 nvkm_chan_block(chan);
59 nvkm_chan_preempt(chan, true);
60
61 /* Update context pointer. */
62 engn->func->bind(engn, cctx, chan);
63
64 /* Resume normal operation. */
65 if (cgrp->hw)
66 nvkm_runl_allow(runl);
67 else
68 nvkm_chan_allow(chan);
69 }
70
71 void
nvkm_chan_cctx_put(struct nvkm_chan * chan,struct nvkm_cctx ** pcctx)72 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
73 {
74 struct nvkm_cctx *cctx = *pcctx;
75
76 if (cctx) {
77 struct nvkm_engn *engn = cctx->vctx->ectx->engn;
78
79 if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
80 CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
81 nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
82 list_del(&cctx->head);
83 kfree(cctx);
84 mutex_unlock(&chan->cgrp->mutex);
85 }
86
87 *pcctx = NULL;
88 }
89 }
90
91 int
nvkm_chan_cctx_get(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx ** pcctx,struct nvkm_client * client)92 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
93 struct nvkm_client *client)
94 {
95 struct nvkm_cgrp *cgrp = chan->cgrp;
96 struct nvkm_vctx *vctx;
97 struct nvkm_cctx *cctx;
98 int ret;
99
100 /* Look for an existing channel context for this engine+VEID. */
101 mutex_lock(&cgrp->mutex);
102 cctx = nvkm_list_find(cctx, &chan->cctxs, head,
103 cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
104 if (cctx) {
105 refcount_inc(&cctx->refs);
106 *pcctx = cctx;
107 mutex_unlock(&cgrp->mutex);
108 return 0;
109 }
110
111 /* Nope - create a fresh one. But, sub-context first. */
112 ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
113 if (ret) {
114 CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
115 goto done;
116 }
117
118 /* Now, create the channel context - to track engine binding. */
119 CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
120 if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
121 nvkm_cgrp_vctx_put(cgrp, &vctx);
122 ret = -ENOMEM;
123 goto done;
124 }
125
126 cctx->vctx = vctx;
127 refcount_set(&cctx->refs, 1);
128 refcount_set(&cctx->uses, 0);
129 list_add_tail(&cctx->head, &chan->cctxs);
130 done:
131 mutex_unlock(&cgrp->mutex);
132 return ret;
133 }
134
135 int
nvkm_chan_preempt_locked(struct nvkm_chan * chan,bool wait)136 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
137 {
138 struct nvkm_runl *runl = chan->cgrp->runl;
139
140 CHAN_TRACE(chan, "preempt");
141 chan->func->preempt(chan);
142 if (!wait)
143 return 0;
144
145 return nvkm_runl_preempt_wait(runl);
146 }
147
148 int
nvkm_chan_preempt(struct nvkm_chan * chan,bool wait)149 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
150 {
151 int ret;
152
153 if (!chan->func->preempt)
154 return 0;
155
156 mutex_lock(&chan->cgrp->runl->mutex);
157 ret = nvkm_chan_preempt_locked(chan, wait);
158 mutex_unlock(&chan->cgrp->runl->mutex);
159 return ret;
160 }
161
162 void
nvkm_chan_remove_locked(struct nvkm_chan * chan)163 nvkm_chan_remove_locked(struct nvkm_chan *chan)
164 {
165 struct nvkm_cgrp *cgrp = chan->cgrp;
166 struct nvkm_runl *runl = cgrp->runl;
167
168 if (list_empty(&chan->head))
169 return;
170
171 CHAN_TRACE(chan, "remove");
172 if (!--cgrp->chan_nr) {
173 runl->cgrp_nr--;
174 list_del(&cgrp->head);
175 }
176 runl->chan_nr--;
177 list_del_init(&chan->head);
178 atomic_set(&runl->changed, 1);
179 }
180
181 void
nvkm_chan_remove(struct nvkm_chan * chan,bool preempt)182 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
183 {
184 struct nvkm_runl *runl = chan->cgrp->runl;
185
186 mutex_lock(&runl->mutex);
187 if (preempt && chan->func->preempt)
188 nvkm_chan_preempt_locked(chan, true);
189 nvkm_chan_remove_locked(chan);
190 nvkm_runl_update_locked(runl, true);
191 mutex_unlock(&runl->mutex);
192 }
193
194 void
nvkm_chan_insert(struct nvkm_chan * chan)195 nvkm_chan_insert(struct nvkm_chan *chan)
196 {
197 struct nvkm_cgrp *cgrp = chan->cgrp;
198 struct nvkm_runl *runl = cgrp->runl;
199
200 mutex_lock(&runl->mutex);
201 if (WARN_ON(!list_empty(&chan->head))) {
202 mutex_unlock(&runl->mutex);
203 return;
204 }
205
206 CHAN_TRACE(chan, "insert");
207 list_add_tail(&chan->head, &cgrp->chans);
208 runl->chan_nr++;
209 if (!cgrp->chan_nr++) {
210 list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
211 runl->cgrp_nr++;
212 }
213 atomic_set(&runl->changed, 1);
214 nvkm_runl_update_locked(runl, true);
215 mutex_unlock(&runl->mutex);
216 }
217
218 static void
nvkm_chan_block_locked(struct nvkm_chan * chan)219 nvkm_chan_block_locked(struct nvkm_chan *chan)
220 {
221 CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
222 if (atomic_inc_return(&chan->blocked) == 1)
223 chan->func->stop(chan);
224 }
225
226 void
nvkm_chan_error(struct nvkm_chan * chan,bool preempt)227 nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
228 {
229 unsigned long flags;
230
231 spin_lock_irqsave(&chan->lock, flags);
232 if (atomic_inc_return(&chan->errored) == 1) {
233 CHAN_ERROR(chan, "errored - disabling channel");
234 nvkm_chan_block_locked(chan);
235 if (preempt)
236 chan->func->preempt(chan);
237 nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
238 }
239 spin_unlock_irqrestore(&chan->lock, flags);
240 }
241
242 void
nvkm_chan_block(struct nvkm_chan * chan)243 nvkm_chan_block(struct nvkm_chan *chan)
244 {
245 spin_lock_irq(&chan->lock);
246 nvkm_chan_block_locked(chan);
247 spin_unlock_irq(&chan->lock);
248 }
249
250 void
nvkm_chan_allow(struct nvkm_chan * chan)251 nvkm_chan_allow(struct nvkm_chan *chan)
252 {
253 spin_lock_irq(&chan->lock);
254 CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
255 if (atomic_dec_and_test(&chan->blocked))
256 chan->func->start(chan);
257 spin_unlock_irq(&chan->lock);
258 }
259
260 void
nvkm_chan_del(struct nvkm_chan ** pchan)261 nvkm_chan_del(struct nvkm_chan **pchan)
262 {
263 struct nvkm_chan *chan = *pchan;
264
265 if (!chan)
266 return;
267
268 if (chan->func->ramfc->clear)
269 chan->func->ramfc->clear(chan);
270
271 nvkm_ramht_del(&chan->ramht);
272 nvkm_gpuobj_del(&chan->pgd);
273 nvkm_gpuobj_del(&chan->eng);
274 nvkm_gpuobj_del(&chan->cache);
275 nvkm_gpuobj_del(&chan->ramfc);
276
277 if (chan->cgrp) {
278 nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
279 nvkm_cgrp_unref(&chan->cgrp);
280 }
281
282 nvkm_memory_unref(&chan->userd.mem);
283
284 if (chan->vmm) {
285 nvkm_vmm_part(chan->vmm, chan->inst->memory);
286 nvkm_vmm_unref(&chan->vmm);
287 }
288
289 nvkm_gpuobj_del(&chan->push);
290 nvkm_gpuobj_del(&chan->inst);
291 kfree(chan);
292 }
293
294 void
nvkm_chan_put(struct nvkm_chan ** pchan,unsigned long irqflags)295 nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
296 {
297 struct nvkm_chan *chan = *pchan;
298
299 if (!chan)
300 return;
301
302 *pchan = NULL;
303 spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
304 }
305
306 struct nvkm_chan *
nvkm_chan_get_inst(struct nvkm_engine * engine,u64 inst,unsigned long * pirqflags)307 nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
308 {
309 struct nvkm_fifo *fifo = engine->subdev.device->fifo;
310 struct nvkm_runl *runl;
311 struct nvkm_engn *engn;
312 struct nvkm_chan *chan;
313
314 nvkm_runl_foreach(runl, fifo) {
315 nvkm_runl_foreach_engn(engn, runl) {
316 if (engine == &fifo->engine || engn->engine == engine) {
317 chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
318 if (chan || engn->engine == engine)
319 return chan;
320 }
321 }
322 }
323
324 return NULL;
325 }
326
327 struct nvkm_chan *
nvkm_chan_get_chid(struct nvkm_engine * engine,int id,unsigned long * pirqflags)328 nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
329 {
330 struct nvkm_fifo *fifo = engine->subdev.device->fifo;
331 struct nvkm_runl *runl;
332 struct nvkm_engn *engn;
333
334 nvkm_runl_foreach(runl, fifo) {
335 nvkm_runl_foreach_engn(engn, runl) {
336 if (fifo->chid || engn->engine == engine)
337 return nvkm_runl_chan_get_chid(runl, id, pirqflags);
338 }
339 }
340
341 return NULL;
342 }
343
344 int
nvkm_chan_new_(const struct nvkm_chan_func * func,struct nvkm_runl * runl,int runq,struct nvkm_cgrp * cgrp,const char * name,bool priv,u32 devm,struct nvkm_vmm * vmm,struct nvkm_dmaobj * dmaobj,u64 offset,u64 length,struct nvkm_memory * userd,u64 ouserd,struct nvkm_chan ** pchan)345 nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
346 struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
347 struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
348 struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
349 {
350 struct nvkm_fifo *fifo = runl->fifo;
351 struct nvkm_device *device = fifo->engine.subdev.device;
352 struct nvkm_chan *chan;
353 int ret;
354
355 /* Validate arguments against class requirements. */
356 if ((runq && runq >= runl->func->runqs) ||
357 (!func->inst->vmm != !vmm) ||
358 (!func->userd->bar == !userd) ||
359 (!func->ramfc->ctxdma != !dmaobj) ||
360 ((func->ramfc->devm < devm) && devm != BIT(0)) ||
361 (!func->ramfc->priv && priv)) {
362 RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
363 "push:%d:%p devm:%08x:%08x priv:%d:%d",
364 runl->func->runqs, runq, func->inst->vmm, vmm,
365 func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
366 func->ramfc->devm, devm, func->ramfc->priv, priv);
367 return -EINVAL;
368 }
369
370 if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
371 return -ENOMEM;
372
373 chan->func = func;
374 strscpy(chan->name, name, sizeof(chan->name));
375 chan->runq = runq;
376 chan->id = -1;
377 spin_lock_init(&chan->lock);
378 atomic_set(&chan->blocked, 1);
379 atomic_set(&chan->errored, 0);
380 INIT_LIST_HEAD(&chan->cctxs);
381 INIT_LIST_HEAD(&chan->head);
382
383 /* Join channel group.
384 *
385 * GK110 and newer support channel groups (aka TSGs), where individual channels
386 * share a timeslice, and, engine context(s).
387 *
388 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
389 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
390 */
391 if (!cgrp) {
392 ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
393 if (ret) {
394 RUNL_DEBUG(runl, "cgrp %d", ret);
395 return ret;
396 }
397
398 cgrp = chan->cgrp;
399 } else {
400 if (cgrp->runl != runl || cgrp->vmm != vmm) {
401 RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
402 return -EINVAL;
403 }
404
405 chan->cgrp = nvkm_cgrp_ref(cgrp);
406 }
407
408 /* Allocate instance block. */
409 ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
410 &chan->inst);
411 if (ret) {
412 RUNL_DEBUG(runl, "inst %d", ret);
413 return ret;
414 }
415
416 /* Initialise virtual address-space. */
417 if (func->inst->vmm) {
418 if (WARN_ON(vmm->mmu != device->mmu))
419 return -EINVAL;
420
421 ret = nvkm_vmm_join(vmm, chan->inst->memory);
422 if (ret) {
423 RUNL_DEBUG(runl, "vmm %d", ret);
424 return ret;
425 }
426
427 chan->vmm = nvkm_vmm_ref(vmm);
428 }
429
430 /* Allocate HW ctxdma for push buffer. */
431 if (func->ramfc->ctxdma) {
432 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
433 if (ret) {
434 RUNL_DEBUG(runl, "bind %d", ret);
435 return ret;
436 }
437 }
438
439 /* Allocate channel ID. */
440 chan->id = nvkm_chid_get(runl->chid, chan);
441 if (chan->id >= 0) {
442 if (!func->userd->bar) {
443 if (ouserd + chan->func->userd->size >=
444 nvkm_memory_size(userd)) {
445 RUNL_DEBUG(runl, "ouserd %llx", ouserd);
446 return -EINVAL;
447 }
448
449 ret = nvkm_memory_kmap(userd, &chan->userd.mem);
450 if (ret) {
451 RUNL_DEBUG(runl, "userd %d", ret);
452 return ret;
453 }
454
455 chan->userd.base = ouserd;
456 } else {
457 chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
458 chan->userd.base = chan->id * chan->func->userd->size;
459 }
460 }
461
462 if (chan->id < 0) {
463 RUNL_ERROR(runl, "!chids");
464 return -ENOSPC;
465 }
466
467 if (cgrp->id < 0)
468 cgrp->id = chan->id;
469
470 /* Initialise USERD. */
471 if (chan->func->userd->clear)
472 chan->func->userd->clear(chan);
473
474 /* Initialise RAMFC. */
475 ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
476 if (ret) {
477 RUNL_DEBUG(runl, "ramfc %d", ret);
478 return ret;
479 }
480
481 return 0;
482 }
483