1  /*
2   * Copyright 2008 Advanced Micro Devices, Inc.
3   * Copyright 2008 Red Hat Inc.
4   * Copyright 2009 Jerome Glisse.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the "Software"),
8   * to deal in the Software without restriction, including without limitation
9   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10   * and/or sell copies of the Software, and to permit persons to whom the
11   * Software is furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22   * OTHER DEALINGS IN THE SOFTWARE.
23   *
24   * Authors: Dave Airlie
25   *          Alex Deucher
26   *          Jerome Glisse
27   */
28  
29  #include <linux/debugfs.h>
30  #include <linux/iosys-map.h>
31  #include <linux/pci.h>
32  
33  #include <drm/drm_device.h>
34  #include <drm/drm_file.h>
35  #include <drm/drm_gem_ttm_helper.h>
36  #include <drm/radeon_drm.h>
37  
38  #include "radeon.h"
39  #include "radeon_prime.h"
40  
41  struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
42  					int flags);
43  struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
44  int radeon_gem_prime_pin(struct drm_gem_object *obj);
45  void radeon_gem_prime_unpin(struct drm_gem_object *obj);
46  
radeon_gem_fault(struct vm_fault * vmf)47  static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
48  {
49  	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
50  	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
51  	vm_fault_t ret;
52  
53  	down_read(&rdev->pm.mclk_lock);
54  
55  	ret = ttm_bo_vm_reserve(bo, vmf);
56  	if (ret)
57  		goto unlock_mclk;
58  
59  	ret = radeon_bo_fault_reserve_notify(bo);
60  	if (ret)
61  		goto unlock_resv;
62  
63  	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
64  				       TTM_BO_VM_NUM_PREFAULT);
65  	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
66  		goto unlock_mclk;
67  
68  unlock_resv:
69  	dma_resv_unlock(bo->base.resv);
70  
71  unlock_mclk:
72  	up_read(&rdev->pm.mclk_lock);
73  	return ret;
74  }
75  
76  static const struct vm_operations_struct radeon_gem_vm_ops = {
77  	.fault = radeon_gem_fault,
78  	.open = ttm_bo_vm_open,
79  	.close = ttm_bo_vm_close,
80  	.access = ttm_bo_vm_access
81  };
82  
radeon_gem_object_free(struct drm_gem_object * gobj)83  static void radeon_gem_object_free(struct drm_gem_object *gobj)
84  {
85  	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
86  
87  	if (robj) {
88  		radeon_mn_unregister(robj);
89  		ttm_bo_put(&robj->tbo);
90  	}
91  }
92  
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)93  int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
94  				int alignment, int initial_domain,
95  				u32 flags, bool kernel,
96  				struct drm_gem_object **obj)
97  {
98  	struct radeon_bo *robj;
99  	unsigned long max_size;
100  	int r;
101  
102  	*obj = NULL;
103  	/* At least align on page size */
104  	if (alignment < PAGE_SIZE) {
105  		alignment = PAGE_SIZE;
106  	}
107  
108  	/* Maximum bo size is the unpinned gtt size since we use the gtt to
109  	 * handle vram to system pool migrations.
110  	 */
111  	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
112  	if (size > max_size) {
113  		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
114  			  size >> 20, max_size >> 20);
115  		return -ENOMEM;
116  	}
117  
118  retry:
119  	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
120  			     flags, NULL, NULL, &robj);
121  	if (r) {
122  		if (r != -ERESTARTSYS) {
123  			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
124  				initial_domain |= RADEON_GEM_DOMAIN_GTT;
125  				goto retry;
126  			}
127  			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
128  				  size, initial_domain, alignment, r);
129  		}
130  		return r;
131  	}
132  	*obj = &robj->tbo.base;
133  	robj->pid = task_pid_nr(current);
134  
135  	mutex_lock(&rdev->gem.mutex);
136  	list_add_tail(&robj->list, &rdev->gem.objects);
137  	mutex_unlock(&rdev->gem.mutex);
138  
139  	return 0;
140  }
141  
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)142  static int radeon_gem_set_domain(struct drm_gem_object *gobj,
143  			  uint32_t rdomain, uint32_t wdomain)
144  {
145  	struct radeon_bo *robj;
146  	uint32_t domain;
147  	long r;
148  
149  	/* FIXME: reeimplement */
150  	robj = gem_to_radeon_bo(gobj);
151  	/* work out where to validate the buffer to */
152  	domain = wdomain;
153  	if (!domain) {
154  		domain = rdomain;
155  	}
156  	if (!domain) {
157  		/* Do nothings */
158  		pr_warn("Set domain without domain !\n");
159  		return 0;
160  	}
161  	if (domain == RADEON_GEM_DOMAIN_CPU) {
162  		/* Asking for cpu access wait for object idle */
163  		r = dma_resv_wait_timeout(robj->tbo.base.resv,
164  					  DMA_RESV_USAGE_BOOKKEEP,
165  					  true, 30 * HZ);
166  		if (!r)
167  			r = -EBUSY;
168  
169  		if (r < 0 && r != -EINTR) {
170  			pr_err("Failed to wait for object: %li\n", r);
171  			return r;
172  		}
173  	}
174  	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
175  		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
176  		return -EINVAL;
177  	}
178  	return 0;
179  }
180  
radeon_gem_init(struct radeon_device * rdev)181  int radeon_gem_init(struct radeon_device *rdev)
182  {
183  	INIT_LIST_HEAD(&rdev->gem.objects);
184  	return 0;
185  }
186  
radeon_gem_fini(struct radeon_device * rdev)187  void radeon_gem_fini(struct radeon_device *rdev)
188  {
189  	radeon_bo_force_delete(rdev);
190  }
191  
192  /*
193   * Call from drm_gem_handle_create which appear in both new and open ioctl
194   * case.
195   */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)196  static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
197  {
198  	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
199  	struct radeon_device *rdev = rbo->rdev;
200  	struct radeon_fpriv *fpriv = file_priv->driver_priv;
201  	struct radeon_vm *vm = &fpriv->vm;
202  	struct radeon_bo_va *bo_va;
203  	int r;
204  
205  	if ((rdev->family < CHIP_CAYMAN) ||
206  	    (!rdev->accel_working)) {
207  		return 0;
208  	}
209  
210  	r = radeon_bo_reserve(rbo, false);
211  	if (r) {
212  		return r;
213  	}
214  
215  	bo_va = radeon_vm_bo_find(vm, rbo);
216  	if (!bo_va) {
217  		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
218  	} else {
219  		++bo_va->ref_count;
220  	}
221  	radeon_bo_unreserve(rbo);
222  
223  	return 0;
224  }
225  
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)226  static void radeon_gem_object_close(struct drm_gem_object *obj,
227  				    struct drm_file *file_priv)
228  {
229  	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
230  	struct radeon_device *rdev = rbo->rdev;
231  	struct radeon_fpriv *fpriv = file_priv->driver_priv;
232  	struct radeon_vm *vm = &fpriv->vm;
233  	struct radeon_bo_va *bo_va;
234  	int r;
235  
236  	if ((rdev->family < CHIP_CAYMAN) ||
237  	    (!rdev->accel_working)) {
238  		return;
239  	}
240  
241  	r = radeon_bo_reserve(rbo, true);
242  	if (r) {
243  		dev_err(rdev->dev, "leaking bo va because "
244  			"we fail to reserve bo (%d)\n", r);
245  		return;
246  	}
247  	bo_va = radeon_vm_bo_find(vm, rbo);
248  	if (bo_va) {
249  		if (--bo_va->ref_count == 0) {
250  			radeon_vm_bo_rmv(rdev, bo_va);
251  		}
252  	}
253  	radeon_bo_unreserve(rbo);
254  }
255  
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)256  static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
257  {
258  	if (r == -EDEADLK) {
259  		r = radeon_gpu_reset(rdev);
260  		if (!r)
261  			r = -EAGAIN;
262  	}
263  	return r;
264  }
265  
radeon_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)266  static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
267  {
268  	struct radeon_bo *bo = gem_to_radeon_bo(obj);
269  	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
270  
271  	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
272  		return -EPERM;
273  
274  	return drm_gem_ttm_mmap(obj, vma);
275  }
276  
277  const struct drm_gem_object_funcs radeon_gem_object_funcs = {
278  	.free = radeon_gem_object_free,
279  	.open = radeon_gem_object_open,
280  	.close = radeon_gem_object_close,
281  	.export = radeon_gem_prime_export,
282  	.pin = radeon_gem_prime_pin,
283  	.unpin = radeon_gem_prime_unpin,
284  	.get_sg_table = radeon_gem_prime_get_sg_table,
285  	.vmap = drm_gem_ttm_vmap,
286  	.vunmap = drm_gem_ttm_vunmap,
287  	.mmap = radeon_gem_object_mmap,
288  	.vm_ops = &radeon_gem_vm_ops,
289  };
290  
291  /*
292   * GEM ioctls.
293   */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)294  int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
295  			  struct drm_file *filp)
296  {
297  	struct radeon_device *rdev = dev->dev_private;
298  	struct drm_radeon_gem_info *args = data;
299  	struct ttm_resource_manager *man;
300  
301  	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
302  
303  	args->vram_size = (u64)man->size << PAGE_SHIFT;
304  	args->vram_visible = rdev->mc.visible_vram_size;
305  	args->vram_visible -= rdev->vram_pin_size;
306  	args->gart_size = rdev->mc.gtt_size;
307  	args->gart_size -= rdev->gart_pin_size;
308  
309  	return 0;
310  }
311  
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)312  int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
313  			    struct drm_file *filp)
314  {
315  	struct radeon_device *rdev = dev->dev_private;
316  	struct drm_radeon_gem_create *args = data;
317  	struct drm_gem_object *gobj;
318  	uint32_t handle;
319  	int r;
320  
321  	down_read(&rdev->exclusive_lock);
322  	/* create a gem object to contain this object in */
323  	args->size = roundup(args->size, PAGE_SIZE);
324  	r = radeon_gem_object_create(rdev, args->size, args->alignment,
325  				     args->initial_domain, args->flags,
326  				     false, &gobj);
327  	if (r) {
328  		up_read(&rdev->exclusive_lock);
329  		r = radeon_gem_handle_lockup(rdev, r);
330  		return r;
331  	}
332  	r = drm_gem_handle_create(filp, gobj, &handle);
333  	/* drop reference from allocate - handle holds it now */
334  	drm_gem_object_put(gobj);
335  	if (r) {
336  		up_read(&rdev->exclusive_lock);
337  		r = radeon_gem_handle_lockup(rdev, r);
338  		return r;
339  	}
340  	args->handle = handle;
341  	up_read(&rdev->exclusive_lock);
342  	return 0;
343  }
344  
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)345  int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
346  			     struct drm_file *filp)
347  {
348  	struct ttm_operation_ctx ctx = { true, false };
349  	struct radeon_device *rdev = dev->dev_private;
350  	struct drm_radeon_gem_userptr *args = data;
351  	struct drm_gem_object *gobj;
352  	struct radeon_bo *bo;
353  	uint32_t handle;
354  	int r;
355  
356  	args->addr = untagged_addr(args->addr);
357  
358  	if (offset_in_page(args->addr | args->size))
359  		return -EINVAL;
360  
361  	/* reject unknown flag values */
362  	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
363  	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
364  	    RADEON_GEM_USERPTR_REGISTER))
365  		return -EINVAL;
366  
367  	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
368  		/* readonly pages not tested on older hardware */
369  		if (rdev->family < CHIP_R600)
370  			return -EINVAL;
371  
372  	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
373  		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
374  
375  		/* if we want to write to it we must require anonymous
376  		   memory and install a MMU notifier */
377  		return -EACCES;
378  	}
379  
380  	down_read(&rdev->exclusive_lock);
381  
382  	/* create a gem object to contain this object in */
383  	r = radeon_gem_object_create(rdev, args->size, 0,
384  				     RADEON_GEM_DOMAIN_CPU, 0,
385  				     false, &gobj);
386  	if (r)
387  		goto handle_lockup;
388  
389  	bo = gem_to_radeon_bo(gobj);
390  	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
391  	if (r)
392  		goto release_object;
393  
394  	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
395  		r = radeon_mn_register(bo, args->addr);
396  		if (r)
397  			goto release_object;
398  	}
399  
400  	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
401  		mmap_read_lock(current->mm);
402  		r = radeon_bo_reserve(bo, true);
403  		if (r) {
404  			mmap_read_unlock(current->mm);
405  			goto release_object;
406  		}
407  
408  		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
409  		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
410  		radeon_bo_unreserve(bo);
411  		mmap_read_unlock(current->mm);
412  		if (r)
413  			goto release_object;
414  	}
415  
416  	r = drm_gem_handle_create(filp, gobj, &handle);
417  	/* drop reference from allocate - handle holds it now */
418  	drm_gem_object_put(gobj);
419  	if (r)
420  		goto handle_lockup;
421  
422  	args->handle = handle;
423  	up_read(&rdev->exclusive_lock);
424  	return 0;
425  
426  release_object:
427  	drm_gem_object_put(gobj);
428  
429  handle_lockup:
430  	up_read(&rdev->exclusive_lock);
431  	r = radeon_gem_handle_lockup(rdev, r);
432  
433  	return r;
434  }
435  
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)436  int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
437  				struct drm_file *filp)
438  {
439  	/* transition the BO to a domain -
440  	 * just validate the BO into a certain domain */
441  	struct radeon_device *rdev = dev->dev_private;
442  	struct drm_radeon_gem_set_domain *args = data;
443  	struct drm_gem_object *gobj;
444  	int r;
445  
446  	/* for now if someone requests domain CPU -
447  	 * just make sure the buffer is finished with */
448  	down_read(&rdev->exclusive_lock);
449  
450  	/* just do a BO wait for now */
451  	gobj = drm_gem_object_lookup(filp, args->handle);
452  	if (gobj == NULL) {
453  		up_read(&rdev->exclusive_lock);
454  		return -ENOENT;
455  	}
456  
457  	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
458  
459  	drm_gem_object_put(gobj);
460  	up_read(&rdev->exclusive_lock);
461  	r = radeon_gem_handle_lockup(rdev, r);
462  	return r;
463  }
464  
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)465  int radeon_mode_dumb_mmap(struct drm_file *filp,
466  			  struct drm_device *dev,
467  			  uint32_t handle, uint64_t *offset_p)
468  {
469  	struct drm_gem_object *gobj;
470  	struct radeon_bo *robj;
471  
472  	gobj = drm_gem_object_lookup(filp, handle);
473  	if (gobj == NULL) {
474  		return -ENOENT;
475  	}
476  	robj = gem_to_radeon_bo(gobj);
477  	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
478  		drm_gem_object_put(gobj);
479  		return -EPERM;
480  	}
481  	*offset_p = radeon_bo_mmap_offset(robj);
482  	drm_gem_object_put(gobj);
483  	return 0;
484  }
485  
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)486  int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
487  			  struct drm_file *filp)
488  {
489  	struct drm_radeon_gem_mmap *args = data;
490  
491  	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
492  }
493  
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)494  int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
495  			  struct drm_file *filp)
496  {
497  	struct drm_radeon_gem_busy *args = data;
498  	struct drm_gem_object *gobj;
499  	struct radeon_bo *robj;
500  	int r;
501  	uint32_t cur_placement = 0;
502  
503  	gobj = drm_gem_object_lookup(filp, args->handle);
504  	if (gobj == NULL) {
505  		return -ENOENT;
506  	}
507  	robj = gem_to_radeon_bo(gobj);
508  
509  	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
510  	if (r == 0)
511  		r = -EBUSY;
512  	else
513  		r = 0;
514  
515  	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
516  	args->domain = radeon_mem_type_to_domain(cur_placement);
517  	drm_gem_object_put(gobj);
518  	return r;
519  }
520  
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)521  int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
522  			      struct drm_file *filp)
523  {
524  	struct radeon_device *rdev = dev->dev_private;
525  	struct drm_radeon_gem_wait_idle *args = data;
526  	struct drm_gem_object *gobj;
527  	struct radeon_bo *robj;
528  	int r = 0;
529  	uint32_t cur_placement = 0;
530  	long ret;
531  
532  	gobj = drm_gem_object_lookup(filp, args->handle);
533  	if (gobj == NULL) {
534  		return -ENOENT;
535  	}
536  	robj = gem_to_radeon_bo(gobj);
537  
538  	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
539  				    true, 30 * HZ);
540  	if (ret == 0)
541  		r = -EBUSY;
542  	else if (ret < 0)
543  		r = ret;
544  
545  	/* Flush HDP cache via MMIO if necessary */
546  	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
547  	if (rdev->asic->mmio_hdp_flush &&
548  	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
549  		robj->rdev->asic->mmio_hdp_flush(rdev);
550  	drm_gem_object_put(gobj);
551  	r = radeon_gem_handle_lockup(rdev, r);
552  	return r;
553  }
554  
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)555  int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
556  				struct drm_file *filp)
557  {
558  	struct drm_radeon_gem_set_tiling *args = data;
559  	struct drm_gem_object *gobj;
560  	struct radeon_bo *robj;
561  	int r = 0;
562  
563  	DRM_DEBUG("%d \n", args->handle);
564  	gobj = drm_gem_object_lookup(filp, args->handle);
565  	if (gobj == NULL)
566  		return -ENOENT;
567  	robj = gem_to_radeon_bo(gobj);
568  	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
569  	drm_gem_object_put(gobj);
570  	return r;
571  }
572  
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)573  int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
574  				struct drm_file *filp)
575  {
576  	struct drm_radeon_gem_get_tiling *args = data;
577  	struct drm_gem_object *gobj;
578  	struct radeon_bo *rbo;
579  	int r = 0;
580  
581  	DRM_DEBUG("\n");
582  	gobj = drm_gem_object_lookup(filp, args->handle);
583  	if (gobj == NULL)
584  		return -ENOENT;
585  	rbo = gem_to_radeon_bo(gobj);
586  	r = radeon_bo_reserve(rbo, false);
587  	if (unlikely(r != 0))
588  		goto out;
589  	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
590  	radeon_bo_unreserve(rbo);
591  out:
592  	drm_gem_object_put(gobj);
593  	return r;
594  }
595  
596  /**
597   * radeon_gem_va_update_vm -update the bo_va in its VM
598   *
599   * @rdev: radeon_device pointer
600   * @bo_va: bo_va to update
601   *
602   * Update the bo_va directly after setting it's address. Errors are not
603   * vital here, so they are not reported back to userspace.
604   */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)605  static void radeon_gem_va_update_vm(struct radeon_device *rdev,
606  				    struct radeon_bo_va *bo_va)
607  {
608  	struct ttm_validate_buffer tv, *entry;
609  	struct radeon_bo_list *vm_bos;
610  	struct ww_acquire_ctx ticket;
611  	struct list_head list;
612  	unsigned domain;
613  	int r;
614  
615  	INIT_LIST_HEAD(&list);
616  
617  	tv.bo = &bo_va->bo->tbo;
618  	tv.num_shared = 1;
619  	list_add(&tv.head, &list);
620  
621  	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
622  	if (!vm_bos)
623  		return;
624  
625  	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
626  	if (r)
627  		goto error_free;
628  
629  	list_for_each_entry(entry, &list, head) {
630  		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
631  		/* if anything is swapped out don't swap it in here,
632  		   just abort and wait for the next CS */
633  		if (domain == RADEON_GEM_DOMAIN_CPU)
634  			goto error_unreserve;
635  	}
636  
637  	mutex_lock(&bo_va->vm->mutex);
638  	r = radeon_vm_clear_freed(rdev, bo_va->vm);
639  	if (r)
640  		goto error_unlock;
641  
642  	if (bo_va->it.start && bo_va->bo)
643  		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
644  
645  error_unlock:
646  	mutex_unlock(&bo_va->vm->mutex);
647  
648  error_unreserve:
649  	ttm_eu_backoff_reservation(&ticket, &list);
650  
651  error_free:
652  	kvfree(vm_bos);
653  
654  	if (r && r != -ERESTARTSYS)
655  		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
656  }
657  
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)658  int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
659  			  struct drm_file *filp)
660  {
661  	struct drm_radeon_gem_va *args = data;
662  	struct drm_gem_object *gobj;
663  	struct radeon_device *rdev = dev->dev_private;
664  	struct radeon_fpriv *fpriv = filp->driver_priv;
665  	struct radeon_bo *rbo;
666  	struct radeon_bo_va *bo_va;
667  	u32 invalid_flags;
668  	int r = 0;
669  
670  	if (!rdev->vm_manager.enabled) {
671  		args->operation = RADEON_VA_RESULT_ERROR;
672  		return -ENOTTY;
673  	}
674  
675  	/* !! DONT REMOVE !!
676  	 * We don't support vm_id yet, to be sure we don't have broken
677  	 * userspace, reject anyone trying to use non 0 value thus moving
678  	 * forward we can use those fields without breaking existant userspace
679  	 */
680  	if (args->vm_id) {
681  		args->operation = RADEON_VA_RESULT_ERROR;
682  		return -EINVAL;
683  	}
684  
685  	if (args->offset < RADEON_VA_RESERVED_SIZE) {
686  		dev_err(dev->dev,
687  			"offset 0x%lX is in reserved area 0x%X\n",
688  			(unsigned long)args->offset,
689  			RADEON_VA_RESERVED_SIZE);
690  		args->operation = RADEON_VA_RESULT_ERROR;
691  		return -EINVAL;
692  	}
693  
694  	/* don't remove, we need to enforce userspace to set the snooped flag
695  	 * otherwise we will endup with broken userspace and we won't be able
696  	 * to enable this feature without adding new interface
697  	 */
698  	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
699  	if ((args->flags & invalid_flags)) {
700  		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
701  			args->flags, invalid_flags);
702  		args->operation = RADEON_VA_RESULT_ERROR;
703  		return -EINVAL;
704  	}
705  
706  	switch (args->operation) {
707  	case RADEON_VA_MAP:
708  	case RADEON_VA_UNMAP:
709  		break;
710  	default:
711  		dev_err(dev->dev, "unsupported operation %d\n",
712  			args->operation);
713  		args->operation = RADEON_VA_RESULT_ERROR;
714  		return -EINVAL;
715  	}
716  
717  	gobj = drm_gem_object_lookup(filp, args->handle);
718  	if (gobj == NULL) {
719  		args->operation = RADEON_VA_RESULT_ERROR;
720  		return -ENOENT;
721  	}
722  	rbo = gem_to_radeon_bo(gobj);
723  	r = radeon_bo_reserve(rbo, false);
724  	if (r) {
725  		args->operation = RADEON_VA_RESULT_ERROR;
726  		drm_gem_object_put(gobj);
727  		return r;
728  	}
729  	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
730  	if (!bo_va) {
731  		args->operation = RADEON_VA_RESULT_ERROR;
732  		radeon_bo_unreserve(rbo);
733  		drm_gem_object_put(gobj);
734  		return -ENOENT;
735  	}
736  
737  	switch (args->operation) {
738  	case RADEON_VA_MAP:
739  		if (bo_va->it.start) {
740  			args->operation = RADEON_VA_RESULT_VA_EXIST;
741  			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
742  			radeon_bo_unreserve(rbo);
743  			goto out;
744  		}
745  		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
746  		break;
747  	case RADEON_VA_UNMAP:
748  		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
749  		break;
750  	default:
751  		break;
752  	}
753  	if (!r)
754  		radeon_gem_va_update_vm(rdev, bo_va);
755  	args->operation = RADEON_VA_RESULT_OK;
756  	if (r) {
757  		args->operation = RADEON_VA_RESULT_ERROR;
758  	}
759  out:
760  	drm_gem_object_put(gobj);
761  	return r;
762  }
763  
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)764  int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
765  			struct drm_file *filp)
766  {
767  	struct drm_radeon_gem_op *args = data;
768  	struct drm_gem_object *gobj;
769  	struct radeon_bo *robj;
770  	int r;
771  
772  	gobj = drm_gem_object_lookup(filp, args->handle);
773  	if (gobj == NULL) {
774  		return -ENOENT;
775  	}
776  	robj = gem_to_radeon_bo(gobj);
777  
778  	r = -EPERM;
779  	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
780  		goto out;
781  
782  	r = radeon_bo_reserve(robj, false);
783  	if (unlikely(r))
784  		goto out;
785  
786  	switch (args->op) {
787  	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
788  		args->value = robj->initial_domain;
789  		break;
790  	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
791  		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
792  						      RADEON_GEM_DOMAIN_GTT |
793  						      RADEON_GEM_DOMAIN_CPU);
794  		break;
795  	default:
796  		r = -EINVAL;
797  	}
798  
799  	radeon_bo_unreserve(robj);
800  out:
801  	drm_gem_object_put(gobj);
802  	return r;
803  }
804  
radeon_align_pitch(struct radeon_device * rdev,int width,int cpp,bool tiled)805  int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
806  {
807  	int aligned = width;
808  	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
809  	int pitch_mask = 0;
810  
811  	switch (cpp) {
812  	case 1:
813  		pitch_mask = align_large ? 255 : 127;
814  		break;
815  	case 2:
816  		pitch_mask = align_large ? 127 : 31;
817  		break;
818  	case 3:
819  	case 4:
820  		pitch_mask = align_large ? 63 : 15;
821  		break;
822  	}
823  
824  	aligned += pitch_mask;
825  	aligned &= ~pitch_mask;
826  	return aligned * cpp;
827  }
828  
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)829  int radeon_mode_dumb_create(struct drm_file *file_priv,
830  			    struct drm_device *dev,
831  			    struct drm_mode_create_dumb *args)
832  {
833  	struct radeon_device *rdev = dev->dev_private;
834  	struct drm_gem_object *gobj;
835  	uint32_t handle;
836  	int r;
837  
838  	args->pitch = radeon_align_pitch(rdev, args->width,
839  					 DIV_ROUND_UP(args->bpp, 8), 0);
840  	args->size = (u64)args->pitch * args->height;
841  	args->size = ALIGN(args->size, PAGE_SIZE);
842  
843  	r = radeon_gem_object_create(rdev, args->size, 0,
844  				     RADEON_GEM_DOMAIN_VRAM, 0,
845  				     false, &gobj);
846  	if (r)
847  		return -ENOMEM;
848  
849  	r = drm_gem_handle_create(file_priv, gobj, &handle);
850  	/* drop reference from allocate - handle holds it now */
851  	drm_gem_object_put(gobj);
852  	if (r) {
853  		return r;
854  	}
855  	args->handle = handle;
856  	return 0;
857  }
858  
859  #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)860  static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
861  {
862  	struct radeon_device *rdev = m->private;
863  	struct radeon_bo *rbo;
864  	unsigned i = 0;
865  
866  	mutex_lock(&rdev->gem.mutex);
867  	list_for_each_entry(rbo, &rdev->gem.objects, list) {
868  		unsigned domain;
869  		const char *placement;
870  
871  		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
872  		switch (domain) {
873  		case RADEON_GEM_DOMAIN_VRAM:
874  			placement = "VRAM";
875  			break;
876  		case RADEON_GEM_DOMAIN_GTT:
877  			placement = " GTT";
878  			break;
879  		case RADEON_GEM_DOMAIN_CPU:
880  		default:
881  			placement = " CPU";
882  			break;
883  		}
884  		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
885  			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
886  			   placement, (unsigned long)rbo->pid);
887  		i++;
888  	}
889  	mutex_unlock(&rdev->gem.mutex);
890  	return 0;
891  }
892  
893  DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
894  #endif
895  
radeon_gem_debugfs_init(struct radeon_device * rdev)896  void radeon_gem_debugfs_init(struct radeon_device *rdev)
897  {
898  #if defined(CONFIG_DEBUG_FS)
899  	struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
900  
901  	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
902  			    &radeon_debugfs_gem_info_fops);
903  
904  #endif
905  }
906