1 /*
2  * Copyright 2005 Stephane Marchesin.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #ifndef __NOUVEAU_DRM_H__
26 #define __NOUVEAU_DRM_H__
27 
28 #define DRM_NOUVEAU_EVENT_NVIF                                       0x80000000
29 
30 #include "drm.h"
31 
32 #if defined(__cplusplus)
33 extern "C" {
34 #endif
35 
36 #define NOUVEAU_GETPARAM_PCI_VENDOR      3
37 #define NOUVEAU_GETPARAM_PCI_DEVICE      4
38 #define NOUVEAU_GETPARAM_BUS_TYPE        5
39 #define NOUVEAU_GETPARAM_FB_SIZE         8
40 #define NOUVEAU_GETPARAM_AGP_SIZE        9
41 #define NOUVEAU_GETPARAM_CHIPSET_ID      11
42 #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
43 #define NOUVEAU_GETPARAM_GRAPH_UNITS     13
44 #define NOUVEAU_GETPARAM_PTIMER_TIME     14
45 #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
46 #define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
47 
48 /*
49  * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
50  *
51  * Query the maximum amount of IBs that can be pushed through a single
52  * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
53  * ioctl().
54  */
55 #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX   17
56 
57 /*
58  * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
59  *
60  * Query the VRAM BAR size.
61  */
62 #define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
63 
64 /*
65  * NOUVEAU_GETPARAM_VRAM_USED
66  *
67  * Get remaining VRAM size.
68  */
69 #define NOUVEAU_GETPARAM_VRAM_USED 19
70 
71 /*
72  * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
73  *
74  * Query whether tile mode and PTE kind are accepted with VM allocs or not.
75  */
76 #define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
77 
78 struct drm_nouveau_getparam {
79 	__u64 param;
80 	__u64 value;
81 };
82 
83 /*
84  * Those are used to support selecting the main engine used on Kepler.
85  * This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle
86  */
87 #define NOUVEAU_FIFO_ENGINE_GR  0x01
88 #define NOUVEAU_FIFO_ENGINE_VP  0x02
89 #define NOUVEAU_FIFO_ENGINE_PPP 0x04
90 #define NOUVEAU_FIFO_ENGINE_BSP 0x08
91 #define NOUVEAU_FIFO_ENGINE_CE  0x30
92 
93 struct drm_nouveau_channel_alloc {
94 	__u32     fb_ctxdma_handle;
95 	__u32     tt_ctxdma_handle;
96 
97 	__s32     channel;
98 	__u32     pushbuf_domains;
99 
100 	/* Notifier memory */
101 	__u32     notifier_handle;
102 
103 	/* DRM-enforced subchannel assignments */
104 	struct {
105 		__u32 handle;
106 		__u32 grclass;
107 	} subchan[8];
108 	__u32 nr_subchan;
109 };
110 
111 struct drm_nouveau_channel_free {
112 	__s32 channel;
113 };
114 
115 struct drm_nouveau_notifierobj_alloc {
116 	__u32 channel;
117 	__u32 handle;
118 	__u32 size;
119 	__u32 offset;
120 };
121 
122 struct drm_nouveau_gpuobj_free {
123 	__s32 channel;
124 	__u32 handle;
125 };
126 
127 #define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
128 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
129 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
130 #define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
131 #define NOUVEAU_GEM_DOMAIN_COHERENT  (1 << 4)
132 /* The BO will never be shared via import or export. */
133 #define NOUVEAU_GEM_DOMAIN_NO_SHARE  (1 << 5)
134 
135 #define NOUVEAU_GEM_TILE_COMP        0x00030000 /* nv50-only */
136 #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
137 #define NOUVEAU_GEM_TILE_16BPP       0x00000001
138 #define NOUVEAU_GEM_TILE_32BPP       0x00000002
139 #define NOUVEAU_GEM_TILE_ZETA        0x00000004
140 #define NOUVEAU_GEM_TILE_NONCONTIG   0x00000008
141 
142 struct drm_nouveau_gem_info {
143 	__u32 handle;
144 	__u32 domain;
145 	__u64 size;
146 	__u64 offset;
147 	__u64 map_handle;
148 	__u32 tile_mode;
149 	__u32 tile_flags;
150 };
151 
152 struct drm_nouveau_gem_new {
153 	struct drm_nouveau_gem_info info;
154 	__u32 channel_hint;
155 	__u32 align;
156 };
157 
158 #define NOUVEAU_GEM_MAX_BUFFERS 1024
159 struct drm_nouveau_gem_pushbuf_bo_presumed {
160 	__u32 valid;
161 	__u32 domain;
162 	__u64 offset;
163 };
164 
165 struct drm_nouveau_gem_pushbuf_bo {
166 	__u64 user_priv;
167 	__u32 handle;
168 	__u32 read_domains;
169 	__u32 write_domains;
170 	__u32 valid_domains;
171 	struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
172 };
173 
174 #define NOUVEAU_GEM_RELOC_LOW  (1 << 0)
175 #define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
176 #define NOUVEAU_GEM_RELOC_OR   (1 << 2)
177 #define NOUVEAU_GEM_MAX_RELOCS 1024
178 struct drm_nouveau_gem_pushbuf_reloc {
179 	__u32 reloc_bo_index;
180 	__u32 reloc_bo_offset;
181 	__u32 bo_index;
182 	__u32 flags;
183 	__u32 data;
184 	__u32 vor;
185 	__u32 tor;
186 };
187 
188 #define NOUVEAU_GEM_MAX_PUSH 512
189 struct drm_nouveau_gem_pushbuf_push {
190 	__u32 bo_index;
191 	__u32 pad;
192 	__u64 offset;
193 	__u64 length;
194 #define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
195 };
196 
197 struct drm_nouveau_gem_pushbuf {
198 	__u32 channel;
199 	__u32 nr_buffers;
200 	__u64 buffers;
201 	__u32 nr_relocs;
202 	__u32 nr_push;
203 	__u64 relocs;
204 	__u64 push;
205 	__u32 suffix0;
206 	__u32 suffix1;
207 #define NOUVEAU_GEM_PUSHBUF_SYNC                                    (1ULL << 0)
208 	__u64 vram_available;
209 	__u64 gart_available;
210 };
211 
212 #define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
213 #define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
214 struct drm_nouveau_gem_cpu_prep {
215 	__u32 handle;
216 	__u32 flags;
217 };
218 
219 struct drm_nouveau_gem_cpu_fini {
220 	__u32 handle;
221 };
222 
223 /**
224  * struct drm_nouveau_sync - sync object
225  *
226  * This structure serves as synchronization mechanism for (potentially)
227  * asynchronous operations such as EXEC or VM_BIND.
228  */
229 struct drm_nouveau_sync {
230 	/**
231 	 * @flags: the flags for a sync object
232 	 *
233 	 * The first 8 bits are used to determine the type of the sync object.
234 	 */
235 	__u32 flags;
236 #define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
237 #define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
238 #define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
239 	/**
240 	 * @handle: the handle of the sync object
241 	 */
242 	__u32 handle;
243 	/**
244 	 * @timeline_value:
245 	 *
246 	 * The timeline point of the sync object in case the syncobj is of
247 	 * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
248 	 */
249 	__u64 timeline_value;
250 };
251 
252 /**
253  * struct drm_nouveau_vm_init - GPU VA space init structure
254  *
255  * Used to initialize the GPU's VA space for a user client, telling the kernel
256  * which portion of the VA space is managed by the UMD and kernel respectively.
257  *
258  * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
259  * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
260  * with -ENOSYS.
261  */
262 struct drm_nouveau_vm_init {
263 	/**
264 	 * @kernel_managed_addr: start address of the kernel managed VA space
265 	 * region
266 	 */
267 	__u64 kernel_managed_addr;
268 	/**
269 	 * @kernel_managed_size: size of the kernel managed VA space region in
270 	 * bytes
271 	 */
272 	__u64 kernel_managed_size;
273 };
274 
275 /**
276  * struct drm_nouveau_vm_bind_op - VM_BIND operation
277  *
278  * This structure represents a single VM_BIND operation. UMDs should pass
279  * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
280  */
281 struct drm_nouveau_vm_bind_op {
282 	/**
283 	 * @op: the operation type
284 	 *
285 	 * Supported values:
286 	 *
287 	 * %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA
288 	 * space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be
289 	 * passed to instruct the kernel to create sparse mappings for the
290 	 * given range.
291 	 *
292 	 * %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the
293 	 * GPU's VA space. If the region the mapping is located in is a
294 	 * sparse region, new sparse mappings are created where the unmapped
295 	 * (memory backed) mapping was mapped previously. To remove a sparse
296 	 * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
297 	 */
298 	__u32 op;
299 #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
300 #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
301 	/**
302 	 * @flags: the flags for a &drm_nouveau_vm_bind_op
303 	 *
304 	 * Supported values:
305 	 *
306 	 * %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA
307 	 * space region should be sparse.
308 	 */
309 	__u32 flags;
310 #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
311 	/**
312 	 * @handle: the handle of the DRM GEM object to map
313 	 */
314 	__u32 handle;
315 	/**
316 	 * @pad: 32 bit padding, should be 0
317 	 */
318 	__u32 pad;
319 	/**
320 	 * @addr:
321 	 *
322 	 * the address the VA space region or (memory backed) mapping should be mapped to
323 	 */
324 	__u64 addr;
325 	/**
326 	 * @bo_offset: the offset within the BO backing the mapping
327 	 */
328 	__u64 bo_offset;
329 	/**
330 	 * @range: the size of the requested mapping in bytes
331 	 */
332 	__u64 range;
333 };
334 
335 /**
336  * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
337  */
338 struct drm_nouveau_vm_bind {
339 	/**
340 	 * @op_count: the number of &drm_nouveau_vm_bind_op
341 	 */
342 	__u32 op_count;
343 	/**
344 	 * @flags: the flags for a &drm_nouveau_vm_bind ioctl
345 	 *
346 	 * Supported values:
347 	 *
348 	 * %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND
349 	 * operation should be executed asynchronously by the kernel.
350 	 *
351 	 * If this flag is not supplied the kernel executes the associated
352 	 * operations synchronously and doesn't accept any &drm_nouveau_sync
353 	 * objects.
354 	 */
355 	__u32 flags;
356 #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
357 	/**
358 	 * @wait_count: the number of wait &drm_nouveau_syncs
359 	 */
360 	__u32 wait_count;
361 	/**
362 	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
363 	 */
364 	__u32 sig_count;
365 	/**
366 	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
367 	 */
368 	__u64 wait_ptr;
369 	/**
370 	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
371 	 */
372 	__u64 sig_ptr;
373 	/**
374 	 * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
375 	 */
376 	__u64 op_ptr;
377 };
378 
379 /**
380  * struct drm_nouveau_exec_push - EXEC push operation
381  *
382  * This structure represents a single EXEC push operation. UMDs should pass an
383  * array of this structure via struct drm_nouveau_exec's &push_ptr field.
384  */
385 struct drm_nouveau_exec_push {
386 	/**
387 	 * @va: the virtual address of the push buffer mapping
388 	 */
389 	__u64 va;
390 	/**
391 	 * @va_len: the length of the push buffer mapping
392 	 */
393 	__u32 va_len;
394 	/**
395 	 * @flags: the flags for this push buffer mapping
396 	 */
397 	__u32 flags;
398 #define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
399 };
400 
401 /**
402  * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
403  */
404 struct drm_nouveau_exec {
405 	/**
406 	 * @channel: the channel to execute the push buffer in
407 	 */
408 	__u32 channel;
409 	/**
410 	 * @push_count: the number of &drm_nouveau_exec_push ops
411 	 */
412 	__u32 push_count;
413 	/**
414 	 * @wait_count: the number of wait &drm_nouveau_syncs
415 	 */
416 	__u32 wait_count;
417 	/**
418 	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
419 	 */
420 	__u32 sig_count;
421 	/**
422 	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
423 	 */
424 	__u64 wait_ptr;
425 	/**
426 	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
427 	 */
428 	__u64 sig_ptr;
429 	/**
430 	 * @push_ptr: pointer to &drm_nouveau_exec_push ops
431 	 */
432 	__u64 push_ptr;
433 };
434 
435 #define DRM_NOUVEAU_GETPARAM           0x00
436 #define DRM_NOUVEAU_SETPARAM           0x01 /* deprecated */
437 #define DRM_NOUVEAU_CHANNEL_ALLOC      0x02
438 #define DRM_NOUVEAU_CHANNEL_FREE       0x03
439 #define DRM_NOUVEAU_GROBJ_ALLOC        0x04 /* deprecated */
440 #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x05 /* deprecated */
441 #define DRM_NOUVEAU_GPUOBJ_FREE        0x06 /* deprecated */
442 #define DRM_NOUVEAU_NVIF               0x07
443 #define DRM_NOUVEAU_SVM_INIT           0x08
444 #define DRM_NOUVEAU_SVM_BIND           0x09
445 #define DRM_NOUVEAU_VM_INIT            0x10
446 #define DRM_NOUVEAU_VM_BIND            0x11
447 #define DRM_NOUVEAU_EXEC               0x12
448 #define DRM_NOUVEAU_GEM_NEW            0x40
449 #define DRM_NOUVEAU_GEM_PUSHBUF        0x41
450 #define DRM_NOUVEAU_GEM_CPU_PREP       0x42
451 #define DRM_NOUVEAU_GEM_CPU_FINI       0x43
452 #define DRM_NOUVEAU_GEM_INFO           0x44
453 
454 struct drm_nouveau_svm_init {
455 	__u64 unmanaged_addr;
456 	__u64 unmanaged_size;
457 };
458 
459 struct drm_nouveau_svm_bind {
460 	__u64 header;
461 	__u64 va_start;
462 	__u64 va_end;
463 	__u64 npages;
464 	__u64 stride;
465 	__u64 result;
466 	__u64 reserved0;
467 	__u64 reserved1;
468 };
469 
470 #define NOUVEAU_SVM_BIND_COMMAND_SHIFT          0
471 #define NOUVEAU_SVM_BIND_COMMAND_BITS           8
472 #define NOUVEAU_SVM_BIND_COMMAND_MASK           ((1 << 8) - 1)
473 #define NOUVEAU_SVM_BIND_PRIORITY_SHIFT         8
474 #define NOUVEAU_SVM_BIND_PRIORITY_BITS          8
475 #define NOUVEAU_SVM_BIND_PRIORITY_MASK          ((1 << 8) - 1)
476 #define NOUVEAU_SVM_BIND_TARGET_SHIFT           16
477 #define NOUVEAU_SVM_BIND_TARGET_BITS            32
478 #define NOUVEAU_SVM_BIND_TARGET_MASK            0xffffffff
479 
480 /*
481  * Below is use to validate ioctl argument, userspace can also use it to make
482  * sure that no bit are set beyond known fields for a given kernel version.
483  */
484 #define NOUVEAU_SVM_BIND_VALID_BITS     48
485 #define NOUVEAU_SVM_BIND_VALID_MASK     ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
486 
487 
488 /*
489  * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
490  * result: number of page successfuly migrate to the target memory.
491  */
492 #define NOUVEAU_SVM_BIND_COMMAND__MIGRATE               0
493 
494 /*
495  * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
496  */
497 #define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM               (1UL << 31)
498 
499 
500 #define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
501 #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
502 #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
503 
504 #define DRM_IOCTL_NOUVEAU_SVM_INIT           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
505 #define DRM_IOCTL_NOUVEAU_SVM_BIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
506 
507 #define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
508 #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
509 #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
510 #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
511 #define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
512 
513 #define DRM_IOCTL_NOUVEAU_VM_INIT            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
514 #define DRM_IOCTL_NOUVEAU_VM_BIND            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
515 #define DRM_IOCTL_NOUVEAU_EXEC               DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
516 #if defined(__cplusplus)
517 }
518 #endif
519 
520 #endif /* __NOUVEAU_DRM_H__ */
521