1  // SPDX-License-Identifier: MIT
2  /*
3   * Copyright © 2019 Intel Corporation
4   */
5  
6  #include <uapi/drm/i915_drm.h>
7  
8  #include "intel_memory_region.h"
9  #include "i915_gem_region.h"
10  #include "i915_drv.h"
11  #include "i915_trace.h"
12  
i915_gem_object_init_memory_region(struct drm_i915_gem_object * obj,struct intel_memory_region * mem)13  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
14  					struct intel_memory_region *mem)
15  {
16  	obj->mm.region = mem;
17  
18  	mutex_lock(&mem->objects.lock);
19  	list_add(&obj->mm.region_link, &mem->objects.list);
20  	mutex_unlock(&mem->objects.lock);
21  }
22  
i915_gem_object_release_memory_region(struct drm_i915_gem_object * obj)23  void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
24  {
25  	struct intel_memory_region *mem = obj->mm.region;
26  
27  	mutex_lock(&mem->objects.lock);
28  	list_del(&obj->mm.region_link);
29  	mutex_unlock(&mem->objects.lock);
30  }
31  
32  static struct drm_i915_gem_object *
__i915_gem_object_create_region(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)33  __i915_gem_object_create_region(struct intel_memory_region *mem,
34  				resource_size_t offset,
35  				resource_size_t size,
36  				resource_size_t page_size,
37  				unsigned int flags)
38  {
39  	struct drm_i915_gem_object *obj;
40  	resource_size_t default_page_size;
41  	int err;
42  
43  	/*
44  	 * NB: Our use of resource_size_t for the size stems from using struct
45  	 * resource for the mem->region. We might need to revisit this in the
46  	 * future.
47  	 */
48  
49  	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
50  
51  	if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY &&
52  			 (flags & I915_BO_ALLOC_CPU_CLEAR ||
53  			  flags & I915_BO_ALLOC_PM_EARLY)))
54  		return ERR_PTR(-EINVAL);
55  
56  	if (!mem)
57  		return ERR_PTR(-ENODEV);
58  
59  	default_page_size = mem->min_page_size;
60  	if (page_size)
61  		default_page_size = page_size;
62  
63  	/* We should be able to fit a page within an sg entry */
64  	GEM_BUG_ON(overflows_type(default_page_size, u32));
65  	GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
66  	GEM_BUG_ON(default_page_size < PAGE_SIZE);
67  
68  	size = round_up(size, default_page_size);
69  
70  	if (default_page_size == size)
71  		flags |= I915_BO_ALLOC_CONTIGUOUS;
72  
73  	GEM_BUG_ON(!size);
74  	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
75  
76  	if (i915_gem_object_size_2big(size))
77  		return ERR_PTR(-E2BIG);
78  
79  	obj = i915_gem_object_alloc();
80  	if (!obj)
81  		return ERR_PTR(-ENOMEM);
82  
83  	/*
84  	 * Anything smaller than the min_page_size can't be freely inserted into
85  	 * the GTT, due to alignemnt restrictions. For such special objects,
86  	 * make sure we force memcpy based suspend-resume. In the future we can
87  	 * revisit this, either by allowing special mis-aligned objects in the
88  	 * migration path, or by mapping all of LMEM upfront using cheap 1G
89  	 * GTT entries.
90  	 */
91  	if (default_page_size < mem->min_page_size)
92  		flags |= I915_BO_ALLOC_PM_EARLY;
93  
94  	err = mem->ops->init_object(mem, obj, offset, size, page_size, flags);
95  	if (err)
96  		goto err_object_free;
97  
98  	trace_i915_gem_object_create(obj);
99  	return obj;
100  
101  err_object_free:
102  	i915_gem_object_free(obj);
103  	return ERR_PTR(err);
104  }
105  
106  struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region * mem,resource_size_t size,resource_size_t page_size,unsigned int flags)107  i915_gem_object_create_region(struct intel_memory_region *mem,
108  			      resource_size_t size,
109  			      resource_size_t page_size,
110  			      unsigned int flags)
111  {
112  	return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET,
113  					       size, page_size, flags);
114  }
115  
116  struct drm_i915_gem_object *
i915_gem_object_create_region_at(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size,unsigned int flags)117  i915_gem_object_create_region_at(struct intel_memory_region *mem,
118  				 resource_size_t offset,
119  				 resource_size_t size,
120  				 unsigned int flags)
121  {
122  	GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET);
123  
124  	if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
125  	    GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size)))
126  		return ERR_PTR(-EINVAL);
127  
128  	if (range_overflows(offset, size, resource_size(&mem->region)))
129  		return ERR_PTR(-EINVAL);
130  
131  	if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
132  	    offset + size > resource_size(&mem->io) &&
133  	    !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
134  		return ERR_PTR(-ENOSPC);
135  
136  	return __i915_gem_object_create_region(mem, offset, size, 0,
137  					       flags | I915_BO_ALLOC_CONTIGUOUS);
138  }
139  
140  /**
141   * i915_gem_process_region - Iterate over all objects of a region using ops
142   * to process and optionally skip objects
143   * @mr: The memory region
144   * @apply: ops and private data
145   *
146   * This function can be used to iterate over the regions object list,
147   * checking whether to skip objects, and, if not, lock the objects and
148   * process them using the supplied ops. Note that this function temporarily
149   * removes objects from the region list while iterating, so that if run
150   * concurrently with itself may not iterate over all objects.
151   *
152   * Return: 0 if successful, negative error code on failure.
153   */
i915_gem_process_region(struct intel_memory_region * mr,struct i915_gem_apply_to_region * apply)154  int i915_gem_process_region(struct intel_memory_region *mr,
155  			    struct i915_gem_apply_to_region *apply)
156  {
157  	const struct i915_gem_apply_to_region_ops *ops = apply->ops;
158  	struct drm_i915_gem_object *obj;
159  	struct list_head still_in_list;
160  	int ret = 0;
161  
162  	/*
163  	 * In the future, a non-NULL apply->ww could mean the caller is
164  	 * already in a locking transaction and provides its own context.
165  	 */
166  	GEM_WARN_ON(apply->ww);
167  
168  	INIT_LIST_HEAD(&still_in_list);
169  	mutex_lock(&mr->objects.lock);
170  	for (;;) {
171  		struct i915_gem_ww_ctx ww;
172  
173  		obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
174  					       mm.region_link);
175  		if (!obj)
176  			break;
177  
178  		list_move_tail(&obj->mm.region_link, &still_in_list);
179  		if (!kref_get_unless_zero(&obj->base.refcount))
180  			continue;
181  
182  		/*
183  		 * Note: Someone else might be migrating the object at this
184  		 * point. The object's region is not stable until we lock
185  		 * the object.
186  		 */
187  		mutex_unlock(&mr->objects.lock);
188  		apply->ww = &ww;
189  		for_i915_gem_ww(&ww, ret, apply->interruptible) {
190  			ret = i915_gem_object_lock(obj, apply->ww);
191  			if (ret)
192  				continue;
193  
194  			if (obj->mm.region == mr)
195  				ret = ops->process_obj(apply, obj);
196  			/* Implicit object unlock */
197  		}
198  
199  		i915_gem_object_put(obj);
200  		mutex_lock(&mr->objects.lock);
201  		if (ret)
202  			break;
203  	}
204  	list_splice_tail(&still_in_list, &mr->objects.list);
205  	mutex_unlock(&mr->objects.lock);
206  
207  	return ret;
208  }
209