Lines Matching full:fifo

52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,  in gk104_fifo_engine_status()  argument
55 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_engine_status()
56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_engine_status()
102 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_new() local
103 if (oclass->engn == &fifo->func->chan) { in gk104_fifo_class_new()
105 return user->ctor(fifo, oclass, argv, argc, pobject); in gk104_fifo_class_new()
107 if (oclass->engn == &fifo->func->user) { in gk104_fifo_class_new()
119 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_get() local
122 if (fifo->func->user.ctor && c++ == index) { in gk104_fifo_class_get()
123 oclass->base = fifo->func->user.user; in gk104_fifo_class_get()
124 oclass->engn = &fifo->func->user; in gk104_fifo_class_get()
128 if (fifo->func->chan.ctor && c++ == index) { in gk104_fifo_class_get()
129 oclass->base = fifo->func->chan.user; in gk104_fifo_class_get()
130 oclass->engn = &fifo->func->chan; in gk104_fifo_class_get()
138 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) in gk104_fifo_uevent_fini() argument
140 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_fini()
145 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) in gk104_fifo_uevent_init() argument
147 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_init()
152 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, in gk104_fifo_runlist_commit() argument
155 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_commit()
179 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) in gk104_fifo_runlist_update() argument
181 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; in gk104_fifo_runlist_update()
183 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_update()
189 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; in gk104_fifo_runlist_update()
190 fifo->runlist[runl].next = !fifo->runlist[runl].next; in gk104_fifo_runlist_update()
193 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_runlist_update()
197 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_runlist_update()
205 func->commit(fifo, runl, mem, nr); in gk104_fifo_runlist_update()
210 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) in gk104_fifo_runlist_remove() argument
213 mutex_lock(&fifo->base.engine.subdev.mutex); in gk104_fifo_runlist_remove()
219 mutex_unlock(&fifo->base.engine.subdev.mutex); in gk104_fifo_runlist_remove()
223 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) in gk104_fifo_runlist_insert() argument
226 mutex_lock(&fifo->base.engine.subdev.mutex); in gk104_fifo_runlist_insert()
229 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); in gk104_fifo_runlist_insert()
232 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); in gk104_fifo_runlist_insert()
234 mutex_unlock(&fifo->base.engine.subdev.mutex); in gk104_fifo_runlist_insert()
253 gk104_fifo_pbdma_init(struct gk104_fifo *fifo) in gk104_fifo_pbdma_init() argument
255 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_init()
256 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); in gk104_fifo_pbdma_init()
260 gk104_fifo_pbdma_nr(struct gk104_fifo *fifo) in gk104_fifo_pbdma_nr() argument
262 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_nr()
277 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); in gk104_fifo_recover_work() local
278 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_recover_work()
284 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_recover_work()
285 runm = fifo->recover.runm; in gk104_fifo_recover_work()
286 engm = fifo->recover.engm; in gk104_fifo_recover_work()
287 fifo->recover.engm = 0; in gk104_fifo_recover_work()
288 fifo->recover.runm = 0; in gk104_fifo_recover_work()
289 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_recover_work()
294 if ((engine = fifo->engine[engn].engine)) { in gk104_fifo_recover_work()
301 gk104_fifo_runlist_update(fifo, runl); in gk104_fifo_recover_work()
307 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
310 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) in gk104_fifo_recover_runl() argument
312 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_runl()
316 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_runl()
317 if (fifo->recover.runm & runm) in gk104_fifo_recover_runl()
319 fifo->recover.runm |= runm; in gk104_fifo_recover_runl()
326 schedule_work(&fifo->recover.work); in gk104_fifo_recover_runl()
330 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) in gk104_fifo_recover_chid() argument
335 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_recover_chid()
342 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_recover_chid()
358 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_recover_chan() local
359 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_chan()
364 unsigned long engn, engm = fifo->runlist[runl].engm; in gk104_fifo_recover_chan()
367 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_chan()
372 chan = gk104_fifo_recover_chid(fifo, runl, chid); in gk104_fifo_recover_chan()
375 nvkm_fifo_kevent(&fifo->base, chid); in gk104_fifo_recover_chan()
383 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_chan()
386 for_each_set_bit(engn, &engm, fifo->engine_nr) { in gk104_fifo_recover_chan()
388 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_chan()
391 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_recover_chan()
396 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) in gk104_fifo_recover_engn() argument
398 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_recover_engn()
399 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_engn()
401 const u32 runl = fifo->engine[engn].runl; in gk104_fifo_recover_engn()
406 assert_spin_locked(&fifo->base.lock); in gk104_fifo_recover_engn()
407 if (fifo->recover.engm & engm) in gk104_fifo_recover_engn()
409 fifo->recover.engm |= engm; in gk104_fifo_recover_engn()
412 gk104_fifo_recover_runl(fifo, runl); in gk104_fifo_recover_engn()
415 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_engn()
418 gk104_fifo_recover_chan(&fifo->base, status.chan->id); in gk104_fifo_recover_engn()
427 const struct nvkm_enum *en = fifo->func->fault.engine; in gk104_fifo_recover_engn()
448 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_recover_engn()
461 schedule_work(&fifo->recover.work); in gk104_fifo_recover_engn()
467 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_fault() local
468 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_fault()
477 er = nvkm_enum_find(fifo->func->fault.reason, info->reason); in gk104_fifo_fault()
478 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); in gk104_fifo_fault()
480 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client); in gk104_fifo_fault()
482 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client); in gk104_fifo_fault()
485 ea = nvkm_enum_find(fifo->func->fault.access, info->access); in gk104_fifo_fault()
518 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_fault()
519 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); in gk104_fifo_fault()
532 gk104_fifo_recover_chan(&fifo->base, chan->chid); in gk104_fifo_fault()
538 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { in gk104_fifo_fault()
539 if (fifo->engine[engn].engine == engine) { in gk104_fifo_fault()
540 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_fault()
545 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_fault()
560 gk104_fifo_intr_bind(struct gk104_fifo *fifo) in gk104_fifo_intr_bind() argument
562 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_bind()
579 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) in gk104_fifo_intr_sched_ctxsw() argument
581 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_sched_ctxsw()
588 spin_lock_irqsave(&fifo->base.lock, flags); in gk104_fifo_intr_sched_ctxsw()
592 for (engn = 0; engn < fifo->engine_nr; engn++) { in gk104_fifo_intr_sched_ctxsw()
595 gk104_fifo_engine_status(fifo, engn, &status); in gk104_fifo_intr_sched_ctxsw()
602 for_each_set_bit(engn, &engm, fifo->engine_nr) in gk104_fifo_intr_sched_ctxsw()
603 gk104_fifo_recover_engn(fifo, engn); in gk104_fifo_intr_sched_ctxsw()
606 spin_unlock_irqrestore(&fifo->base.lock, flags); in gk104_fifo_intr_sched_ctxsw()
610 gk104_fifo_intr_sched(struct gk104_fifo *fifo) in gk104_fifo_intr_sched() argument
612 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_sched()
623 gk104_fifo_intr_sched_ctxsw(fifo); in gk104_fifo_intr_sched()
631 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) in gk104_fifo_intr_chsw() argument
633 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_chsw()
641 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) in gk104_fifo_intr_dropped_fault() argument
643 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_dropped_fault()
684 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) in gk104_fifo_intr_pbdma_0() argument
686 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_0()
711 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); in gk104_fifo_intr_pbdma_0()
717 nvkm_fifo_chan_put(&fifo->base, flags, &chan); in gk104_fifo_intr_pbdma_0()
733 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) in gk104_fifo_intr_pbdma_1() argument
735 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_1()
754 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) in gk104_fifo_intr_runlist() argument
756 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_runlist()
760 wake_up(&fifo->runlist[runl].wait); in gk104_fifo_intr_runlist()
767 gk104_fifo_intr_engine(struct gk104_fifo *fifo) in gk104_fifo_intr_engine() argument
769 nvkm_fifo_uevent(&fifo->base); in gk104_fifo_intr_engine()
775 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_intr() local
776 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr()
782 gk104_fifo_intr_bind(fifo); in gk104_fifo_intr()
794 gk104_fifo_intr_sched(fifo); in gk104_fifo_intr()
800 gk104_fifo_intr_chsw(fifo); in gk104_fifo_intr()
818 gk104_fifo_intr_dropped_fault(fifo); in gk104_fifo_intr()
827 fifo->func->intr.fault(&fifo->base, unit); in gk104_fifo_intr()
838 gk104_fifo_intr_pbdma_0(fifo, unit); in gk104_fifo_intr()
839 gk104_fifo_intr_pbdma_1(fifo, unit); in gk104_fifo_intr()
847 gk104_fifo_intr_runlist(fifo); in gk104_fifo_intr()
853 gk104_fifo_intr_engine(fifo); in gk104_fifo_intr()
867 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_fini() local
868 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_fini()
869 flush_work(&fifo->recover.work); in gk104_fifo_fini()
870 /* allow mmu fault interrupts, even when we're not using fifo */ in gk104_fifo_fini()
877 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_info() local
880 *data = (1ULL << fifo->runlist_nr) - 1; in gk104_fifo_info()
885 if (runl < fifo->runlist_nr) { in gk104_fifo_info()
886 unsigned long engm = fifo->runlist[runl].engm; in gk104_fifo_info()
889 for_each_set_bit(engn, &engm, fifo->engine_nr) { in gk104_fifo_info()
890 if ((engine = fifo->engine[engn].engine)) in gk104_fifo_info()
905 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_oneinit() local
906 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_oneinit()
913 fifo->pbdma_nr = fifo->func->pbdma->nr(fifo); in gk104_fifo_oneinit()
914 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); in gk104_fifo_oneinit()
917 if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) in gk104_fifo_oneinit()
920 for (i = 0; i < fifo->pbdma_nr; i++) in gk104_fifo_oneinit()
927 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { in gk104_fifo_oneinit()
937 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); in gk104_fifo_oneinit()
938 fifo->engine[engn].runl = runl; in gk104_fifo_oneinit()
939 fifo->engine[engn].pbid = pbid; in gk104_fifo_oneinit()
940 fifo->engine_nr = max(fifo->engine_nr, engn + 1); in gk104_fifo_oneinit()
941 fifo->runlist[runl].engm |= 1 << engn; in gk104_fifo_oneinit()
942 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); in gk104_fifo_oneinit()
947 for (i = 0; i < fifo->runlist_nr; i++) { in gk104_fifo_oneinit()
948 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { in gk104_fifo_oneinit()
950 fifo->base.nr * 2/* TSG+chan */ * in gk104_fifo_oneinit()
951 fifo->func->runlist->size, in gk104_fifo_oneinit()
953 &fifo->runlist[i].mem[j]); in gk104_fifo_oneinit()
958 init_waitqueue_head(&fifo->runlist[i].wait); in gk104_fifo_oneinit()
959 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); in gk104_fifo_oneinit()
960 INIT_LIST_HEAD(&fifo->runlist[i].chan); in gk104_fifo_oneinit()
964 fifo->base.nr * 0x200, 0x1000, true, in gk104_fifo_oneinit()
965 &fifo->user.mem); in gk104_fifo_oneinit()
969 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), in gk104_fifo_oneinit()
970 &fifo->user.bar); in gk104_fifo_oneinit()
974 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); in gk104_fifo_oneinit()
980 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_init() local
981 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_init()
985 fifo->func->pbdma->init(fifo); in gk104_fifo_init()
988 for (i = 0; i < fifo->pbdma_nr; i++) { in gk104_fifo_init()
995 for (i = 0; i < fifo->pbdma_nr; i++) { in gk104_fifo_init()
1000 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); in gk104_fifo_init()
1002 if (fifo->func->pbdma->init_timeout) in gk104_fifo_init()
1003 fifo->func->pbdma->init_timeout(fifo); in gk104_fifo_init()
1012 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_dtor() local
1013 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_dtor()
1016 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); in gk104_fifo_dtor()
1017 nvkm_memory_unref(&fifo->user.mem); in gk104_fifo_dtor()
1019 for (i = 0; i < fifo->runlist_nr; i++) { in gk104_fifo_dtor()
1020 nvkm_memory_unref(&fifo->runlist[i].mem[1]); in gk104_fifo_dtor()
1021 nvkm_memory_unref(&fifo->runlist[i].mem[0]); in gk104_fifo_dtor()
1024 return fifo; in gk104_fifo_dtor()
1047 struct gk104_fifo *fifo; in gk104_fifo_new_() local
1049 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) in gk104_fifo_new_()
1051 fifo->func = func; in gk104_fifo_new_()
1052 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); in gk104_fifo_new_()
1053 *pfifo = &fifo->base; in gk104_fifo_new_()
1055 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); in gk104_fifo_new_()