Lines Matching +full:mc +full:- +full:sid
32 #include <subdev/mc.h>
42 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; in gk104_chan_stop()
44 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); in gk104_chan_stop()
50 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; in gk104_chan_start()
52 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); in gk104_chan_start()
58 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; in gk104_chan_unbind()
60 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000); in gk104_chan_unbind()
66 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; in gk104_chan_bind_inst()
68 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12); in gk104_chan_bind_inst()
74 struct nvkm_runl *runl = chan->cgrp->runl; in gk104_chan_bind()
75 struct nvkm_device *device = runl->fifo->engine.subdev.device; in gk104_chan_bind()
77 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16); in gk104_chan_bind()
84 const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; in gk104_chan_ramfc_write()
87 nvkm_kmap(chan->inst); in gk104_chan_ramfc_write()
88 nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd)); in gk104_chan_ramfc_write()
89 nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd)); in gk104_chan_ramfc_write()
90 nvkm_wo32(chan->inst, 0x10, 0x0000face); in gk104_chan_ramfc_write()
91 nvkm_wo32(chan->inst, 0x30, 0xfffff902); in gk104_chan_ramfc_write()
92 nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset)); in gk104_chan_ramfc_write()
93 nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16)); in gk104_chan_ramfc_write()
94 nvkm_wo32(chan->inst, 0x84, 0x20400000); in gk104_chan_ramfc_write()
95 nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm); in gk104_chan_ramfc_write()
96 nvkm_wo32(chan->inst, 0x9c, 0x00000100); in gk104_chan_ramfc_write()
97 nvkm_wo32(chan->inst, 0xac, 0x0000001f); in gk104_chan_ramfc_write()
98 nvkm_wo32(chan->inst, 0xe4, priv ? 0x00000020 : 0x00000000); in gk104_chan_ramfc_write()
99 nvkm_wo32(chan->inst, 0xe8, chan->id); in gk104_chan_ramfc_write()
100 nvkm_wo32(chan->inst, 0xb8, 0xf8000000); in gk104_chan_ramfc_write()
101 nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */ in gk104_chan_ramfc_write()
102 nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */ in gk104_chan_ramfc_write()
103 nvkm_done(chan->inst); in gk104_chan_ramfc_write()
139 switch (engn->engine->subdev.type) { in gk104_ectx_bind()
153 if (!engn->engine->subdev.inst) in gk104_ectx_bind()
163 addr = cctx->vctx->vma->addr; in gk104_ectx_bind()
167 nvkm_kmap(chan->inst); in gk104_ectx_bind()
168 nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr)); in gk104_ectx_bind()
169 nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr)); in gk104_ectx_bind()
171 nvkm_wo32(chan->inst, ptr1 + 0, lower_32_bits(addr)); in gk104_ectx_bind()
172 nvkm_wo32(chan->inst, ptr1 + 4, upper_32_bits(addr)); in gk104_ectx_bind()
174 nvkm_done(chan->inst); in gk104_ectx_bind()
183 ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma); in gk104_ectx_ctor()
187 return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, &args, sizeof(args)); in gk104_ectx_ctor()
206 u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08)); in gk104_engn_status()
208 status->busy = !!(stat & 0x80000000); in gk104_engn_status()
209 status->faulted = !!(stat & 0x40000000); in gk104_engn_status()
210 status->next.tsg = !!(stat & 0x10000000); in gk104_engn_status()
211 status->next.id = (stat & 0x0fff0000) >> 16; in gk104_engn_status()
212 status->chsw = !!(stat & 0x00008000); in gk104_engn_status()
213 status->save = !!(stat & 0x00004000); in gk104_engn_status()
214 status->load = !!(stat & 0x00002000); in gk104_engn_status()
215 status->prev.tsg = !!(stat & 0x00001000); in gk104_engn_status()
216 status->prev.id = (stat & 0x00000fff); in gk104_engn_status()
217 status->chan = NULL; in gk104_engn_status()
219 if (status->busy && status->chsw) { in gk104_engn_status()
220 if (status->load && status->save) { in gk104_engn_status()
221 if (nvkm_engine_chsw_load(engn->engine)) in gk104_engn_status()
222 status->chan = &status->next; in gk104_engn_status()
224 status->chan = &status->prev; in gk104_engn_status()
226 if (status->load) { in gk104_engn_status()
227 status->chan = &status->next; in gk104_engn_status()
229 status->chan = &status->prev; in gk104_engn_status()
232 if (status->load) { in gk104_engn_status()
233 status->chan = &status->prev; in gk104_engn_status()
236 ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s", in gk104_engn_status()
237 stat, status->busy, status->faulted, status->chsw, status->save, status->load, in gk104_engn_status()
238 status->prev.tsg ? "tsg" : "ch", status->prev.id, in gk104_engn_status()
239 status->chan == &status->prev ? "*" : " ", in gk104_engn_status()
240 status->next.tsg ? "tsg" : "ch", status->next.id, in gk104_engn_status()
241 status->chan == &status->next ? "*" : " "); in gk104_engn_status()
251 *cgid = status.chan->tsg; in gk104_engn_cxid()
252 return status.chan->id; in gk104_engn_cxid()
255 return -ENODEV; in gk104_engn_cxid()
291 struct nvkm_device *device = runq->fifo->engine.subdev.device; in gk104_runq_idle()
293 return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000); in gk104_runq_idle()
309 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; in gk104_runq_intr_1()
310 struct nvkm_device *device = subdev->device; in gk104_runq_intr_1()
311 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); in gk104_runq_intr_1()
312 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; in gk104_runq_intr_1()
313 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; in gk104_runq_intr_1()
317 if (runq->func->intr_1_ctxnotvalid && in gk104_runq_intr_1()
318 runq->func->intr_1_ctxnotvalid(runq, chid)) in gk104_runq_intr_1()
325 runq->id, stat, msg, chid, in gk104_runq_intr_1()
326 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), in gk104_runq_intr_1()
327 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); in gk104_runq_intr_1()
330 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); in gk104_runq_intr_1()
381 struct nvkm_device *device = runq->fifo->engine.subdev.device; in gk104_runq_init()
385 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ in gk104_runq_init()
386 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ in gk104_runq_init()
392 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); in gk104_runq_runm()
406 nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id)); in gk104_runl_fault_clear()
412 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000); in gk104_runl_allow()
418 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id)); in gk104_runl_block()
424 struct nvkm_device *device = runl->fifo->engine.subdev.device; in gk104_runl_pending()
426 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000; in gk104_runl_pending()
432 struct nvkm_fifo *fifo = runl->fifo; in gk104_runl_commit()
433 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_runl_commit()
445 spin_lock_irq(&fifo->lock); in gk104_runl_commit()
447 nvkm_wr32(device, 0x002274, (runl->id << 20) | count); in gk104_runl_commit()
448 spin_unlock_irq(&fifo->lock); in gk104_runl_commit()
454 nvkm_wo32(memory, offset + 0, chan->id); in gk104_runl_insert_chan()
611 struct nvkm_subdev *subdev = &fifo->engine.subdev; in gk104_fifo_intr_bind()
612 u32 intr = nvkm_rd32(subdev->device, 0x00252c); in gk104_fifo_intr_bind()
616 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); in gk104_fifo_intr_bind()
622 struct nvkm_subdev *subdev = &fifo->engine.subdev; in gk104_fifo_intr_chsw()
623 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_chsw()
633 struct nvkm_subdev *subdev = &fifo->engine.subdev; in gk104_fifo_intr_dropped_fault()
634 u32 stat = nvkm_rd32(subdev->device, 0x00259c); in gk104_fifo_intr_dropped_fault()
642 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_intr_runlist()
646 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) { in gk104_fifo_intr_runlist()
647 nvkm_wr32(device, 0x002a00, BIT(runl->id)); in gk104_fifo_intr_runlist()
655 struct nvkm_subdev *subdev = &fifo->engine.subdev; in gk104_fifo_intr()
656 struct nvkm_device *device = subdev->device; in gk104_fifo_intr()
719 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); in gk104_fifo_intr()
725 spin_lock(&fifo->lock); in gk104_fifo_intr()
727 spin_unlock(&fifo->lock); in gk104_fifo_intr()
737 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_init_pbdmas()
746 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_init()
748 if (fifo->func->chan.func->userd->bar == 1) in gk104_fifo_init()
749 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); in gk104_fifo_init()
758 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_runl_ctor()
764 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { in gk104_fifo_runl_ctor()
765 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); in gk104_fifo_runl_ctor()
767 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); in gk104_fifo_runl_ctor()
771 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { in gk104_fifo_runl_ctor()
772 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) in gk104_fifo_runl_ctor()
773 return -ENOMEM; in gk104_fifo_runl_ctor()
775 runl->runq[runl->runq_nr++] = runq; in gk104_fifo_runl_ctor()
780 if (tdev->engine < 0) in gk104_fifo_runl_ctor()
783 switch (tdev->type) { in gk104_fifo_runl_ctor()
785 func = fifo->func->engn_ce; in gk104_fifo_runl_ctor()
791 func = fifo->func->engn; in gk104_fifo_runl_ctor()
795 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); in gk104_fifo_runl_ctor()