1  /*
2   * SPDX-License-Identifier: MIT
3   *
4   * Copyright © 2019 Intel Corporation
5   */
6  
7  #include <linux/wait_bit.h>
8  
9  #include "intel_runtime_pm.h"
10  #include "intel_wakeref.h"
11  #include "i915_drv.h"
12  
__intel_wakeref_get_first(struct intel_wakeref * wf)13  int __intel_wakeref_get_first(struct intel_wakeref *wf)
14  {
15  	intel_wakeref_t wakeref;
16  	int ret = 0;
17  
18  	wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
19  	/*
20  	 * Treat get/put as different subclasses, as we may need to run
21  	 * the put callback from under the shrinker and do not want to
22  	 * cross-contanimate that callback with any extra work performed
23  	 * upon acquiring the wakeref.
24  	 */
25  	mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
26  
27  	if (!atomic_read(&wf->count)) {
28  		INTEL_WAKEREF_BUG_ON(wf->wakeref);
29  		wf->wakeref = wakeref;
30  		wakeref = 0;
31  
32  		ret = wf->ops->get(wf);
33  		if (ret) {
34  			wakeref = xchg(&wf->wakeref, 0);
35  			wake_up_var(&wf->wakeref);
36  			goto unlock;
37  		}
38  
39  		smp_mb__before_atomic(); /* release wf->count */
40  	}
41  
42  	atomic_inc(&wf->count);
43  	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
44  
45  unlock:
46  	mutex_unlock(&wf->mutex);
47  	if (unlikely(wakeref))
48  		intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
49  
50  	return ret;
51  }
52  
____intel_wakeref_put_last(struct intel_wakeref * wf)53  static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
54  {
55  	intel_wakeref_t wakeref = 0;
56  
57  	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
58  	if (unlikely(!atomic_dec_and_test(&wf->count)))
59  		goto unlock;
60  
61  	/* ops->put() must reschedule its own release on error/deferral */
62  	if (likely(!wf->ops->put(wf))) {
63  		INTEL_WAKEREF_BUG_ON(!wf->wakeref);
64  		wakeref = xchg(&wf->wakeref, 0);
65  		wake_up_var(&wf->wakeref);
66  	}
67  
68  unlock:
69  	mutex_unlock(&wf->mutex);
70  	if (wakeref)
71  		intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
72  }
73  
__intel_wakeref_put_last(struct intel_wakeref * wf,unsigned long flags)74  void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
75  {
76  	INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
77  
78  	/* Assume we are not in process context and so cannot sleep. */
79  	if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
80  		mod_delayed_work(wf->i915->unordered_wq, &wf->work,
81  				 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
82  		return;
83  	}
84  
85  	____intel_wakeref_put_last(wf);
86  }
87  
__intel_wakeref_put_work(struct work_struct * wrk)88  static void __intel_wakeref_put_work(struct work_struct *wrk)
89  {
90  	struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
91  
92  	if (atomic_add_unless(&wf->count, -1, 1))
93  		return;
94  
95  	mutex_lock(&wf->mutex);
96  	____intel_wakeref_put_last(wf);
97  }
98  
__intel_wakeref_init(struct intel_wakeref * wf,struct drm_i915_private * i915,const struct intel_wakeref_ops * ops,struct intel_wakeref_lockclass * key,const char * name)99  void __intel_wakeref_init(struct intel_wakeref *wf,
100  			  struct drm_i915_private *i915,
101  			  const struct intel_wakeref_ops *ops,
102  			  struct intel_wakeref_lockclass *key,
103  			  const char *name)
104  {
105  	wf->i915 = i915;
106  	wf->ops = ops;
107  
108  	__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
109  	atomic_set(&wf->count, 0);
110  	wf->wakeref = 0;
111  
112  	INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
113  	lockdep_init_map(&wf->work.work.lockdep_map,
114  			 "wakeref.work", &key->work, 0);
115  
116  #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
117  	ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
118  #endif
119  }
120  
intel_wakeref_wait_for_idle(struct intel_wakeref * wf)121  int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
122  {
123  	int err;
124  
125  	might_sleep();
126  
127  	err = wait_var_event_killable(&wf->wakeref,
128  				      !intel_wakeref_is_active(wf));
129  	if (err)
130  		return err;
131  
132  	intel_wakeref_unlock_wait(wf);
133  	return 0;
134  }
135  
wakeref_auto_timeout(struct timer_list * t)136  static void wakeref_auto_timeout(struct timer_list *t)
137  {
138  	struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
139  	intel_wakeref_t wakeref;
140  	unsigned long flags;
141  
142  	if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
143  		return;
144  
145  	wakeref = fetch_and_zero(&wf->wakeref);
146  	spin_unlock_irqrestore(&wf->lock, flags);
147  
148  	intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
149  }
150  
intel_wakeref_auto_init(struct intel_wakeref_auto * wf,struct drm_i915_private * i915)151  void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
152  			     struct drm_i915_private *i915)
153  {
154  	spin_lock_init(&wf->lock);
155  	timer_setup(&wf->timer, wakeref_auto_timeout, 0);
156  	refcount_set(&wf->count, 0);
157  	wf->wakeref = 0;
158  	wf->i915 = i915;
159  }
160  
intel_wakeref_auto(struct intel_wakeref_auto * wf,unsigned long timeout)161  void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
162  {
163  	unsigned long flags;
164  
165  	if (!timeout) {
166  		if (del_timer_sync(&wf->timer))
167  			wakeref_auto_timeout(&wf->timer);
168  		return;
169  	}
170  
171  	/* Our mission is that we only extend an already active wakeref */
172  	assert_rpm_wakelock_held(&wf->i915->runtime_pm);
173  
174  	if (!refcount_inc_not_zero(&wf->count)) {
175  		spin_lock_irqsave(&wf->lock, flags);
176  		if (!refcount_inc_not_zero(&wf->count)) {
177  			INTEL_WAKEREF_BUG_ON(wf->wakeref);
178  			wf->wakeref =
179  				intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
180  			refcount_set(&wf->count, 1);
181  		}
182  		spin_unlock_irqrestore(&wf->lock, flags);
183  	}
184  
185  	/*
186  	 * If we extend a pending timer, we will only get a single timer
187  	 * callback and so need to cancel the local inc by running the
188  	 * elided callback to keep the wf->count balanced.
189  	 */
190  	if (mod_timer(&wf->timer, jiffies + timeout))
191  		wakeref_auto_timeout(&wf->timer);
192  }
193  
intel_wakeref_auto_fini(struct intel_wakeref_auto * wf)194  void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
195  {
196  	intel_wakeref_auto(wf, 0);
197  	INTEL_WAKEREF_BUG_ON(wf->wakeref);
198  }
199  
intel_ref_tracker_show(struct ref_tracker_dir * dir,struct drm_printer * p)200  void intel_ref_tracker_show(struct ref_tracker_dir *dir,
201  			    struct drm_printer *p)
202  {
203  	const size_t buf_size = PAGE_SIZE;
204  	char *buf, *sb, *se;
205  	size_t count;
206  
207  	buf = kmalloc(buf_size, GFP_NOWAIT);
208  	if (!buf)
209  		return;
210  
211  	count = ref_tracker_dir_snprint(dir, buf, buf_size);
212  	if (!count)
213  		goto free;
214  	/* printk does not like big buffers, so we split it */
215  	for (sb = buf; *sb; sb = se + 1) {
216  		se = strchrnul(sb, '\n');
217  		drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
218  		if (!*se)
219  			break;
220  	}
221  	if (count >= buf_size)
222  		drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
223  			   count + 1 - buf_size);
224  free:
225  	kfree(buf);
226  }
227