1  // SPDX-License-Identifier: MIT
2  /*
3   * Copyright © 2023 Intel Corporation
4   */
5  
6  #include "i915_drv.h"
7  
8  #include <drm/display/drm_dp_tunnel.h>
9  
10  #include "intel_atomic.h"
11  #include "intel_display_limits.h"
12  #include "intel_display_types.h"
13  #include "intel_dp.h"
14  #include "intel_dp_link_training.h"
15  #include "intel_dp_mst.h"
16  #include "intel_dp_tunnel.h"
17  #include "intel_link_bw.h"
18  
19  struct intel_dp_tunnel_inherited_state {
20  	struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
21  };
22  
23  /**
24   * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
25   * @intel_dp: DP port object the tunnel is connected to
26   *
27   * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
28   * should be called after detecting a sink-disconnect event from the port.
29   */
intel_dp_tunnel_disconnect(struct intel_dp * intel_dp)30  void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
31  {
32  	drm_dp_tunnel_destroy(intel_dp->tunnel);
33  	intel_dp->tunnel = NULL;
34  }
35  
36  /**
37   * intel_dp_tunnel_destroy - Destroy a DP tunnel
38   * @intel_dp: DP port object the tunnel is connected to
39   *
40   * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
41   * allocation mode on the tunnel. This should be called while destroying the
42   * port.
43   */
intel_dp_tunnel_destroy(struct intel_dp * intel_dp)44  void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
45  {
46  	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
47  		drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
48  
49  	intel_dp_tunnel_disconnect(intel_dp);
50  }
51  
kbytes_to_mbits(int kbytes)52  static int kbytes_to_mbits(int kbytes)
53  {
54  	return DIV_ROUND_UP(kbytes * 8, 1000);
55  }
56  
get_current_link_bw(struct intel_dp * intel_dp,bool * below_dprx_bw)57  static int get_current_link_bw(struct intel_dp *intel_dp,
58  			       bool *below_dprx_bw)
59  {
60  	int rate = intel_dp_max_common_rate(intel_dp);
61  	int lane_count = intel_dp_max_common_lane_count(intel_dp);
62  	int bw;
63  
64  	bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
65  	*below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
66  
67  	return bw;
68  }
69  
update_tunnel_state(struct intel_dp * intel_dp)70  static int update_tunnel_state(struct intel_dp *intel_dp)
71  {
72  	struct intel_display *display = to_intel_display(intel_dp);
73  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
74  	bool old_bw_below_dprx;
75  	bool new_bw_below_dprx;
76  	int old_bw;
77  	int new_bw;
78  	int ret;
79  
80  	old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
81  
82  	ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
83  	if (ret < 0) {
84  		drm_dbg_kms(display->drm,
85  			    "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
86  			    drm_dp_tunnel_name(intel_dp->tunnel),
87  			    encoder->base.base.id, encoder->base.name,
88  			    ERR_PTR(ret));
89  
90  		return ret;
91  	}
92  
93  	if (ret == 0 ||
94  	    !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
95  		return 0;
96  
97  	intel_dp_update_sink_caps(intel_dp);
98  
99  	new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
100  
101  	/* Suppress the notification if the mode list can't change due to bw. */
102  	if (old_bw_below_dprx == new_bw_below_dprx &&
103  	    !new_bw_below_dprx)
104  		return 0;
105  
106  	drm_dbg_kms(display->drm,
107  		    "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
108  		    drm_dp_tunnel_name(intel_dp->tunnel),
109  		    encoder->base.base.id, encoder->base.name,
110  		    kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
111  
112  	return 1;
113  }
114  
115  /*
116   * Allocate the BW for a tunnel on a DP connector/port if the connector/port
117   * was already active when detecting the tunnel. The allocated BW must be
118   * freed by the next atomic modeset, storing the BW in the
119   * intel_atomic_state::inherited_dp_tunnels, and calling
120   * intel_dp_tunnel_atomic_free_bw().
121   */
allocate_initial_tunnel_bw_for_pipes(struct intel_dp * intel_dp,u8 pipe_mask)122  static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
123  {
124  	struct intel_display *display = to_intel_display(intel_dp);
125  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
126  	struct intel_crtc *crtc;
127  	int tunnel_bw = 0;
128  	int err;
129  
130  	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
131  		const struct intel_crtc_state *crtc_state =
132  			to_intel_crtc_state(crtc->base.state);
133  		int stream_bw = intel_dp_config_required_rate(crtc_state);
134  
135  		tunnel_bw += stream_bw;
136  
137  		drm_dbg_kms(display->drm,
138  			    "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
139  			    drm_dp_tunnel_name(intel_dp->tunnel),
140  			    encoder->base.base.id, encoder->base.name,
141  			    crtc->base.base.id, crtc->base.name,
142  			    crtc->pipe,
143  			    kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
144  	}
145  
146  	err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
147  	if (err) {
148  		drm_dbg_kms(display->drm,
149  			    "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
150  			    drm_dp_tunnel_name(intel_dp->tunnel),
151  			    encoder->base.base.id, encoder->base.name,
152  			    ERR_PTR(err));
153  
154  		return err;
155  	}
156  
157  	return update_tunnel_state(intel_dp);
158  }
159  
allocate_initial_tunnel_bw(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)160  static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
161  				      struct drm_modeset_acquire_ctx *ctx)
162  {
163  	u8 pipe_mask;
164  	int err;
165  
166  	err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
167  	if (err)
168  		return err;
169  
170  	return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
171  }
172  
detect_new_tunnel(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)173  static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
174  {
175  	struct intel_display *display = to_intel_display(intel_dp);
176  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
177  	struct drm_dp_tunnel *tunnel;
178  	int ret;
179  
180  	tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
181  				      &intel_dp->aux);
182  	if (IS_ERR(tunnel))
183  		return PTR_ERR(tunnel);
184  
185  	intel_dp->tunnel = tunnel;
186  
187  	ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
188  	if (ret) {
189  		if (ret == -EOPNOTSUPP)
190  			return 0;
191  
192  		drm_dbg_kms(display->drm,
193  			    "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
194  			    drm_dp_tunnel_name(intel_dp->tunnel),
195  			    encoder->base.base.id, encoder->base.name,
196  			    ERR_PTR(ret));
197  
198  		/* Keep the tunnel with BWA disabled */
199  		return 0;
200  	}
201  
202  	ret = allocate_initial_tunnel_bw(intel_dp, ctx);
203  	if (ret < 0)
204  		intel_dp_tunnel_destroy(intel_dp);
205  
206  	return ret;
207  }
208  
209  /**
210   * intel_dp_tunnel_detect - Detect a DP tunnel on a port
211   * @intel_dp: DP port object
212   * @ctx: lock context acquired by the connector detection handler
213   *
214   * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
215   * on it if supported and allocating the BW required on an already active port.
216   * The BW allocated this way must be freed by the next atomic modeset calling
217   * intel_dp_tunnel_atomic_free_bw().
218   *
219   * If @intel_dp has already a tunnel detected on it, update the tunnel's state
220   * wrt. its support for BW allocation mode and the available BW via the
221   * tunnel. If the tunnel's state change requires this - for instance the
222   * tunnel's group ID has changed - the tunnel will be dropped and recreated.
223   *
224   * Return 0 in case of success - after any tunnel detected and added to
225   * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
226   * way that requires notifying user space.
227   */
intel_dp_tunnel_detect(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)228  int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
229  {
230  	int ret;
231  
232  	if (intel_dp_is_edp(intel_dp))
233  		return 0;
234  
235  	if (intel_dp->tunnel) {
236  		ret = update_tunnel_state(intel_dp);
237  		if (ret >= 0)
238  			return ret;
239  
240  		/* Try to recreate the tunnel after an update error. */
241  		intel_dp_tunnel_destroy(intel_dp);
242  	}
243  
244  	return detect_new_tunnel(intel_dp, ctx);
245  }
246  
247  /**
248   * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
249   * @intel_dp: DP port object
250   *
251   * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
252   * the BW allocation mode.
253   *
254   * Returns %true if the BW allocation mode is supported on @intel_dp.
255   */
intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp * intel_dp)256  bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
257  {
258  	return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
259  }
260  
261  /**
262   * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
263   * @intel_dp: DP port object
264   *
265   * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
266   */
intel_dp_tunnel_suspend(struct intel_dp * intel_dp)267  void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
268  {
269  	struct intel_display *display = to_intel_display(intel_dp);
270  	struct intel_connector *connector = intel_dp->attached_connector;
271  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
272  
273  	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
274  		return;
275  
276  	drm_dbg_kms(display->drm,
277  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
278  		    drm_dp_tunnel_name(intel_dp->tunnel),
279  		    connector->base.base.id, connector->base.name,
280  		    encoder->base.base.id, encoder->base.name);
281  
282  	drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
283  
284  	intel_dp->tunnel_suspended = true;
285  }
286  
287  /**
288   * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
289   * @intel_dp: DP port object
290   * @crtc_state: CRTC state
291   * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
292   *
293   * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
294   */
intel_dp_tunnel_resume(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool dpcd_updated)295  void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
296  			    const struct intel_crtc_state *crtc_state,
297  			    bool dpcd_updated)
298  {
299  	struct intel_display *display = to_intel_display(intel_dp);
300  	struct intel_connector *connector = intel_dp->attached_connector;
301  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
302  	u8 dpcd[DP_RECEIVER_CAP_SIZE];
303  	u8 pipe_mask;
304  	int err = 0;
305  
306  	if (!intel_dp->tunnel_suspended)
307  		return;
308  
309  	intel_dp->tunnel_suspended = false;
310  
311  	drm_dbg_kms(display->drm,
312  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
313  		    drm_dp_tunnel_name(intel_dp->tunnel),
314  		    connector->base.base.id, connector->base.name,
315  		    encoder->base.base.id, encoder->base.name);
316  
317  	/*
318  	 * The TBT Connection Manager requires the GFX driver to read out
319  	 * the sink's DPRX caps to be able to service any BW requests later.
320  	 * During resume overriding the caps in @intel_dp cached before
321  	 * suspend must be avoided, so do here only a dummy read, unless the
322  	 * capabilities were updated already during resume.
323  	 */
324  	if (!dpcd_updated) {
325  		err = intel_dp_read_dprx_caps(intel_dp, dpcd);
326  
327  		if (err) {
328  			drm_dp_tunnel_set_io_error(intel_dp->tunnel);
329  			goto out_err;
330  		}
331  	}
332  
333  	err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
334  	if (err)
335  		goto out_err;
336  
337  	pipe_mask = 0;
338  	if (crtc_state) {
339  		struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
340  
341  		/* TODO: Add support for MST */
342  		pipe_mask |= BIT(crtc->pipe);
343  	}
344  
345  	err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
346  	if (err < 0)
347  		goto out_err;
348  
349  	return;
350  
351  out_err:
352  	drm_dbg_kms(display->drm,
353  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
354  		    drm_dp_tunnel_name(intel_dp->tunnel),
355  		    connector->base.base.id, connector->base.name,
356  		    encoder->base.base.id, encoder->base.name,
357  		    ERR_PTR(err));
358  }
359  
360  static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state * state,struct intel_crtc * crtc)361  get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
362  {
363  	if (!state->inherited_dp_tunnels)
364  		return NULL;
365  
366  	return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
367  }
368  
369  static int
add_inherited_tunnel(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,struct intel_crtc * crtc)370  add_inherited_tunnel(struct intel_atomic_state *state,
371  		     struct drm_dp_tunnel *tunnel,
372  		     struct intel_crtc *crtc)
373  {
374  	struct intel_display *display = to_intel_display(state);
375  	struct drm_dp_tunnel *old_tunnel;
376  
377  	old_tunnel = get_inherited_tunnel(state, crtc);
378  	if (old_tunnel) {
379  		drm_WARN_ON(display->drm, old_tunnel != tunnel);
380  		return 0;
381  	}
382  
383  	if (!state->inherited_dp_tunnels) {
384  		state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels),
385  						      GFP_KERNEL);
386  		if (!state->inherited_dp_tunnels)
387  			return -ENOMEM;
388  	}
389  
390  	drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
391  
392  	return 0;
393  }
394  
check_inherited_tunnel_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_digital_connector_state * old_conn_state)395  static int check_inherited_tunnel_state(struct intel_atomic_state *state,
396  					struct intel_dp *intel_dp,
397  					const struct intel_digital_connector_state *old_conn_state)
398  {
399  	struct intel_display *display = to_intel_display(state);
400  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
401  	struct intel_connector *connector =
402  		to_intel_connector(old_conn_state->base.connector);
403  	struct intel_crtc *old_crtc;
404  	const struct intel_crtc_state *old_crtc_state;
405  
406  	/*
407  	 * If a BWA tunnel gets detected only after the corresponding
408  	 * connector got enabled already without a BWA tunnel, or a different
409  	 * BWA tunnel (which was removed meanwhile) the old CRTC state won't
410  	 * contain the state of the current tunnel. This tunnel still has a
411  	 * reserved BW, which needs to be released, add the state for such
412  	 * inherited tunnels separately only to this atomic state.
413  	 */
414  	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
415  		return 0;
416  
417  	if (!old_conn_state->base.crtc)
418  		return 0;
419  
420  	old_crtc = to_intel_crtc(old_conn_state->base.crtc);
421  	old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
422  
423  	if (!old_crtc_state->hw.active ||
424  	    old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
425  		return 0;
426  
427  	drm_dbg_kms(display->drm,
428  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
429  		    drm_dp_tunnel_name(intel_dp->tunnel),
430  		    connector->base.base.id, connector->base.name,
431  		    encoder->base.base.id, encoder->base.name,
432  		    old_crtc->base.base.id, old_crtc->base.name,
433  		    intel_dp->tunnel);
434  
435  	return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
436  }
437  
438  /**
439   * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
440   * @state: Atomic state
441   *
442   * Free the inherited DP tunnel state in @state.
443   */
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state * state)444  void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
445  {
446  	struct intel_display *display = to_intel_display(state);
447  	enum pipe pipe;
448  
449  	if (!state->inherited_dp_tunnels)
450  		return;
451  
452  	for_each_pipe(display, pipe)
453  		if (state->inherited_dp_tunnels->ref[pipe].tunnel)
454  			drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
455  
456  	kfree(state->inherited_dp_tunnels);
457  	state->inherited_dp_tunnels = NULL;
458  }
459  
intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel)460  static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
461  						  struct drm_dp_tunnel *tunnel)
462  {
463  	struct intel_display *display = to_intel_display(state);
464  	u32 pipe_mask;
465  	int err;
466  
467  	err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
468  							      tunnel, &pipe_mask);
469  	if (err)
470  		return err;
471  
472  	drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
473  
474  	return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
475  }
476  
477  /**
478   * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
479   * @state: Atomic state
480   * @crtc: CRTC to add the tunnel state for
481   *
482   * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
483   * via a DP tunnel.
484   *
485   * Return 0 in case of success, a negative error code otherwise.
486   */
intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)487  int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
488  					      struct intel_crtc *crtc)
489  {
490  	const struct intel_crtc_state *new_crtc_state =
491  		intel_atomic_get_new_crtc_state(state, crtc);
492  	const struct drm_dp_tunnel_state *tunnel_state;
493  	struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
494  
495  	if (!tunnel)
496  		return 0;
497  
498  	tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
499  	if (IS_ERR(tunnel_state))
500  		return PTR_ERR(tunnel_state);
501  
502  	return 0;
503  }
504  
check_group_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector,struct intel_crtc * crtc)505  static int check_group_state(struct intel_atomic_state *state,
506  			     struct intel_dp *intel_dp,
507  			     struct intel_connector *connector,
508  			     struct intel_crtc *crtc)
509  {
510  	struct intel_display *display = to_intel_display(state);
511  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
512  	const struct intel_crtc_state *crtc_state =
513  		intel_atomic_get_new_crtc_state(state, crtc);
514  
515  	if (!crtc_state->dp_tunnel_ref.tunnel)
516  		return 0;
517  
518  	drm_dbg_kms(display->drm,
519  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
520  		    drm_dp_tunnel_name(intel_dp->tunnel),
521  		    connector->base.base.id, connector->base.name,
522  		    encoder->base.base.id, encoder->base.name,
523  		    crtc->base.base.id, crtc->base.name,
524  		    crtc_state->dp_tunnel_ref.tunnel);
525  
526  	return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
527  }
528  
529  /**
530   * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
531   * @state: Atomic state
532   * @intel_dp: DP port object
533   * @connector: connector using @intel_dp
534   *
535   * Check and add the DP tunnel atomic state for @intel_dp/@connector to
536   * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
537   * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
538   * DP tunnel.
539   *
540   * Returns 0 in case of success, or a negative error code otherwise.
541   */
intel_dp_tunnel_atomic_check_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector)542  int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
543  				       struct intel_dp *intel_dp,
544  				       struct intel_connector *connector)
545  {
546  	const struct intel_digital_connector_state *old_conn_state =
547  		intel_atomic_get_old_connector_state(state, connector);
548  	const struct intel_digital_connector_state *new_conn_state =
549  		intel_atomic_get_new_connector_state(state, connector);
550  	int err;
551  
552  	if (old_conn_state->base.crtc) {
553  		err = check_group_state(state, intel_dp, connector,
554  					to_intel_crtc(old_conn_state->base.crtc));
555  		if (err)
556  			return err;
557  	}
558  
559  	if (new_conn_state->base.crtc &&
560  	    new_conn_state->base.crtc != old_conn_state->base.crtc) {
561  		err = check_group_state(state, intel_dp, connector,
562  					to_intel_crtc(new_conn_state->base.crtc));
563  		if (err)
564  			return err;
565  	}
566  
567  	return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
568  }
569  
570  /**
571   * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
572   * @state: Atomic state
573   * @intel_dp: DP object
574   * @connector: connector using @intel_dp
575   * @crtc_state: state of CRTC of the given DP tunnel stream
576   *
577   * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
578   * the DP tunnel state containing the stream in @state. Before re-calculating a
579   * BW requirement in the crtc_state state the old BW requirement computed by this
580   * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
581   *
582   * Returns 0 in case of success, a negative error code otherwise.
583   */
intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_connector * connector,struct intel_crtc_state * crtc_state)584  int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
585  					     struct intel_dp *intel_dp,
586  					     const struct intel_connector *connector,
587  					     struct intel_crtc_state *crtc_state)
588  {
589  	struct intel_display *display = to_intel_display(state);
590  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
591  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
592  	int required_rate = intel_dp_config_required_rate(crtc_state);
593  	int ret;
594  
595  	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
596  		return 0;
597  
598  	drm_dbg_kms(display->drm,
599  		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
600  		    drm_dp_tunnel_name(intel_dp->tunnel),
601  		    connector->base.base.id, connector->base.name,
602  		    encoder->base.base.id, encoder->base.name,
603  		    crtc->base.base.id, crtc->base.name,
604  		    crtc->pipe,
605  		    kbytes_to_mbits(required_rate));
606  
607  	ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
608  						 crtc->pipe, required_rate);
609  	if (ret < 0)
610  		return ret;
611  
612  	drm_dp_tunnel_ref_get(intel_dp->tunnel,
613  			      &crtc_state->dp_tunnel_ref);
614  
615  	return 0;
616  }
617  
618  /**
619   * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
620   * @state: Atomic state
621   * @crtc_state: state of CRTC of the given DP tunnel stream
622   *
623   * Clear any DP tunnel stream BW requirement set by
624   * intel_dp_tunnel_atomic_compute_stream_bw().
625   */
intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)626  void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
627  					    struct intel_crtc_state *crtc_state)
628  {
629  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
630  
631  	if (!crtc_state->dp_tunnel_ref.tunnel)
632  		return;
633  
634  	drm_dp_tunnel_atomic_set_stream_bw(&state->base,
635  					   crtc_state->dp_tunnel_ref.tunnel,
636  					   crtc->pipe, 0);
637  	drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
638  }
639  
640  /**
641   * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
642   * @state: intel atomic state
643   * @limits: link BW limits
644   *
645   * Check the link configuration for all DP tunnels in @state. If the
646   * configuration is invalid @limits will be updated if possible to
647   * reduce the total BW, after which the configuration for all CRTCs in
648   * @state must be recomputed with the updated @limits.
649   *
650   * Returns:
651   *   - 0 if the confugration is valid
652   *   - %-EAGAIN, if the configuration is invalid and @limits got updated
653   *     with fallback values with which the configuration of all CRTCs in
654   *     @state must be recomputed
655   *   - Other negative error, if the configuration is invalid without a
656   *     fallback possibility, or the check failed for another reason
657   */
intel_dp_tunnel_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)658  int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
659  				      struct intel_link_bw_limits *limits)
660  {
661  	u32 failed_stream_mask;
662  	int err;
663  
664  	err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
665  						    &failed_stream_mask);
666  	if (err != -ENOSPC)
667  		return err;
668  
669  	err = intel_link_bw_reduce_bpp(state, limits,
670  				       failed_stream_mask, "DP tunnel link BW");
671  
672  	return err ? : -EAGAIN;
673  }
674  
atomic_decrease_bw(struct intel_atomic_state * state)675  static void atomic_decrease_bw(struct intel_atomic_state *state)
676  {
677  	struct intel_crtc *crtc;
678  	const struct intel_crtc_state *old_crtc_state;
679  	const struct intel_crtc_state *new_crtc_state;
680  	int i;
681  
682  	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
683  		const struct drm_dp_tunnel_state *new_tunnel_state;
684  		struct drm_dp_tunnel *tunnel;
685  		int old_bw;
686  		int new_bw;
687  
688  		if (!intel_crtc_needs_modeset(new_crtc_state))
689  			continue;
690  
691  		tunnel = get_inherited_tunnel(state, crtc);
692  		if (!tunnel)
693  			tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
694  
695  		if (!tunnel)
696  			continue;
697  
698  		old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
699  
700  		new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
701  		new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
702  
703  		if (new_bw >= old_bw)
704  			continue;
705  
706  		drm_dp_tunnel_alloc_bw(tunnel, new_bw);
707  	}
708  }
709  
queue_retry_work(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,const struct intel_crtc_state * crtc_state)710  static void queue_retry_work(struct intel_atomic_state *state,
711  			     struct drm_dp_tunnel *tunnel,
712  			     const struct intel_crtc_state *crtc_state)
713  {
714  	struct intel_display *display = to_intel_display(state);
715  	struct intel_encoder *encoder;
716  
717  	encoder = intel_get_crtc_new_encoder(state, crtc_state);
718  
719  	if (!intel_digital_port_connected(encoder))
720  		return;
721  
722  	drm_dbg_kms(display->drm,
723  		    "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
724  		    drm_dp_tunnel_name(tunnel),
725  		    encoder->base.base.id,
726  		    encoder->base.name);
727  
728  	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
729  }
730  
atomic_increase_bw(struct intel_atomic_state * state)731  static void atomic_increase_bw(struct intel_atomic_state *state)
732  {
733  	struct intel_crtc *crtc;
734  	const struct intel_crtc_state *crtc_state;
735  	int i;
736  
737  	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
738  		struct drm_dp_tunnel_state *tunnel_state;
739  		struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
740  		int bw;
741  
742  		if (!intel_crtc_needs_modeset(crtc_state))
743  			continue;
744  
745  		if (!tunnel)
746  			continue;
747  
748  		tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
749  
750  		bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
751  
752  		if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
753  			queue_retry_work(state, tunnel, crtc_state);
754  	}
755  }
756  
757  /**
758   * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
759   * @state: Atomic state
760   *
761   * Allocate the required BW for all tunnels in @state.
762   */
intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state * state)763  void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
764  {
765  	atomic_decrease_bw(state);
766  	atomic_increase_bw(state);
767  }
768  
769  /**
770   * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
771   * @display: display device
772   *
773   * Initialize the DP tunnel manager. The tunnel manager will support the
774   * detection/management of DP tunnels on all DP connectors, so the function
775   * must be called after all these connectors have been registered already.
776   *
777   * Return 0 in case of success, a negative error code otherwise.
778   */
intel_dp_tunnel_mgr_init(struct intel_display * display)779  int intel_dp_tunnel_mgr_init(struct intel_display *display)
780  {
781  	struct drm_dp_tunnel_mgr *tunnel_mgr;
782  	struct drm_connector_list_iter connector_list_iter;
783  	struct intel_connector *connector;
784  	int dp_connectors = 0;
785  
786  	drm_connector_list_iter_begin(display->drm, &connector_list_iter);
787  	for_each_intel_connector_iter(connector, &connector_list_iter) {
788  		if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
789  			continue;
790  
791  		dp_connectors++;
792  	}
793  	drm_connector_list_iter_end(&connector_list_iter);
794  
795  	tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
796  	if (IS_ERR(tunnel_mgr))
797  		return PTR_ERR(tunnel_mgr);
798  
799  	display->dp_tunnel_mgr = tunnel_mgr;
800  
801  	return 0;
802  }
803  
804  /**
805   * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
806   * @display: display device
807   *
808   * Clean up the DP tunnel manager state.
809   */
intel_dp_tunnel_mgr_cleanup(struct intel_display * display)810  void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
811  {
812  	drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
813  	display->dp_tunnel_mgr = NULL;
814  }
815