Lines Matching +full:non +full:- +full:overlapping
1 // SPDX-License-Identifier: MIT
24 * userspace-managable portion of the VA space. It provides operations to map
50 * - unmap non-existent sparse mappings
51 * - unmap a sparse mapping and map a new sparse mapping overlapping the range
53 * - unmap a sparse mapping and map new memory backed mappings overlapping the
82 * be pending. Hence, EXEC jobs require to have the particular fences - of
83 * the corresponding VM_BIND jobs they depent on - attached to them.
91 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit()
96 ret = nouveau_fence_create(&exec_job->fence, exec_job->chan); in nouveau_exec_job_submit()
121 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_exec_job_armed_submit()
122 job->resv_usage, job->resv_usage); in nouveau_exec_job_armed_submit()
130 struct nouveau_channel *chan = exec_job->chan; in nouveau_exec_job_run()
131 struct nouveau_fence *fence = exec_job->fence; in nouveau_exec_job_run()
134 ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16); in nouveau_exec_job_run()
136 NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret); in nouveau_exec_job_run()
140 for (i = 0; i < exec_job->push.count; i++) { in nouveau_exec_job_run()
141 struct drm_nouveau_exec_push *p = &exec_job->push.s[i]; in nouveau_exec_job_run()
142 bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH; in nouveau_exec_job_run()
144 nv50_dma_push(chan, p->va, p->va_len, no_prefetch); in nouveau_exec_job_run()
149 nouveau_fence_unref(&exec_job->fence); in nouveau_exec_job_run()
150 NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret); in nouveau_exec_job_run()
158 exec_job->fence = NULL; in nouveau_exec_job_run()
160 return &fence->base; in nouveau_exec_job_run()
171 kfree(exec_job->fence); in nouveau_exec_job_free()
172 kfree(exec_job->push.s); in nouveau_exec_job_free()
180 struct nouveau_channel *chan = exec_job->chan; in nouveau_exec_job_timeout()
182 if (unlikely(!atomic_read(&chan->killed))) in nouveau_exec_job_timeout()
185 NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", in nouveau_exec_job_timeout()
186 chan->chid); in nouveau_exec_job_timeout()
207 for (i = 0; i < __args->push.count; i++) { in nouveau_exec_job_init()
208 struct drm_nouveau_exec_push *p = &__args->push.s[i]; in nouveau_exec_job_init()
210 if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) { in nouveau_exec_job_init()
211 NV_PRINTK(err, nouveau_cli(__args->file_priv), in nouveau_exec_job_init()
213 p->va_len, NV50_DMA_PUSH_MAX_LENGTH); in nouveau_exec_job_init()
214 return -EINVAL; in nouveau_exec_job_init()
220 return -ENOMEM; in nouveau_exec_job_init()
222 job->push.count = __args->push.count; in nouveau_exec_job_init()
223 if (__args->push.count) { in nouveau_exec_job_init()
224 job->push.s = kmemdup(__args->push.s, in nouveau_exec_job_init()
225 sizeof(*__args->push.s) * in nouveau_exec_job_init()
226 __args->push.count, in nouveau_exec_job_init()
228 if (!job->push.s) { in nouveau_exec_job_init()
229 ret = -ENOMEM; in nouveau_exec_job_init()
234 args.file_priv = __args->file_priv; in nouveau_exec_job_init()
235 job->chan = __args->chan; in nouveau_exec_job_init()
237 args.sched = __args->sched; in nouveau_exec_job_init()
239 args.credits = job->push.count + 1; in nouveau_exec_job_init()
241 args.in_sync.count = __args->in_sync.count; in nouveau_exec_job_init()
242 args.in_sync.s = __args->in_sync.s; in nouveau_exec_job_init()
244 args.out_sync.count = __args->out_sync.count; in nouveau_exec_job_init()
245 args.out_sync.s = __args->out_sync.s; in nouveau_exec_job_init()
250 ret = nouveau_job_init(&job->base, &args); in nouveau_exec_job_init()
257 kfree(job->push.s); in nouveau_exec_job_init()
275 ret = nouveau_job_submit(&job->base); in nouveau_exec()
282 nouveau_job_fini(&job->base); in nouveau_exec()
291 u32 inc = req->wait_count; in nouveau_exec_ucopy()
292 u64 ins = req->wait_ptr; in nouveau_exec_ucopy()
293 u32 outc = req->sig_count; in nouveau_exec_ucopy()
294 u64 outs = req->sig_ptr; in nouveau_exec_ucopy()
295 u32 pushc = req->push_count; in nouveau_exec_ucopy()
296 u64 pushs = req->push_ptr; in nouveau_exec_ucopy()
300 args->push.count = pushc; in nouveau_exec_ucopy()
301 args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s)); in nouveau_exec_ucopy()
302 if (IS_ERR(args->push.s)) in nouveau_exec_ucopy()
303 return PTR_ERR(args->push.s); in nouveau_exec_ucopy()
307 s = &args->in_sync.s; in nouveau_exec_ucopy()
309 args->in_sync.count = inc; in nouveau_exec_ucopy()
318 s = &args->out_sync.s; in nouveau_exec_ucopy()
320 args->out_sync.count = outc; in nouveau_exec_ucopy()
331 u_free(args->push.s); in nouveau_exec_ucopy()
333 u_free(args->in_sync.s); in nouveau_exec_ucopy()
340 u_free(args->push.s); in nouveau_exec_ufree()
341 u_free(args->in_sync.s); in nouveau_exec_ufree()
342 u_free(args->out_sync.s); in nouveau_exec_ufree()
359 return -ENOMEM; in nouveau_exec_ioctl_exec()
363 return nouveau_abi16_put(abi16, -ENOSYS); in nouveau_exec_ioctl_exec()
365 list_for_each_entry(chan16, &abi16->channels, head) { in nouveau_exec_ioctl_exec()
366 if (chan16->chan->chid == req->channel) { in nouveau_exec_ioctl_exec()
367 chan = chan16->chan; in nouveau_exec_ioctl_exec()
373 return nouveau_abi16_put(abi16, -ENOENT); in nouveau_exec_ioctl_exec()
375 if (unlikely(atomic_read(&chan->killed))) in nouveau_exec_ioctl_exec()
376 return nouveau_abi16_put(abi16, -ENODEV); in nouveau_exec_ioctl_exec()
378 if (!chan->dma.ib_max) in nouveau_exec_ioctl_exec()
379 return nouveau_abi16_put(abi16, -ENOSYS); in nouveau_exec_ioctl_exec()
381 push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max); in nouveau_exec_ioctl_exec()
382 if (unlikely(req->push_count > push_max)) { in nouveau_exec_ioctl_exec()
384 req->push_count, push_max); in nouveau_exec_ioctl_exec()
385 return nouveau_abi16_put(abi16, -EINVAL); in nouveau_exec_ioctl_exec()
392 args.sched = chan16->sched; in nouveau_exec_ioctl_exec()