1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #ifndef __MSM_KMS_H__
9 #define __MSM_KMS_H__
10 
11 #include <linux/clk.h>
12 #include <linux/regulator/consumer.h>
13 
14 #include "msm_drv.h"
15 
16 #define MAX_PLANE	4
17 
18 /* As there are different display controller blocks depending on the
19  * snapdragon version, the kms support is split out and the appropriate
20  * implementation is loaded at runtime.  The kms module is responsible
21  * for constructing the appropriate planes/crtcs/encoders/connectors.
22  */
23 struct msm_kms_funcs {
24 	/* hw initialization: */
25 	int (*hw_init)(struct msm_kms *kms);
26 	/* irq handling: */
27 	void (*irq_preinstall)(struct msm_kms *kms);
28 	int (*irq_postinstall)(struct msm_kms *kms);
29 	void (*irq_uninstall)(struct msm_kms *kms);
30 	irqreturn_t (*irq)(struct msm_kms *kms);
31 	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
32 	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
33 
34 	/*
35 	 * Atomic commit handling:
36 	 *
37 	 * Note that in the case of async commits, the funcs which take
38 	 * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
39 	 * might not be evenly balanced with ->prepare_commit(), however
40 	 * each crtc that effected by a ->prepare_commit() (potentially
41 	 * multiple times) will eventually (at end of vsync period) be
42 	 * flushed and completed.
43 	 *
44 	 * This has some implications about tracking of cleanup state,
45 	 * for example SMP blocks to release after commit completes.  Ie.
46 	 * cleanup state should be also duplicated in the various
47 	 * duplicate_state() methods, as the current cleanup state at
48 	 * ->complete_commit() time may have accumulated cleanup work
49 	 * from multiple commits.
50 	 */
51 
52 	/**
53 	 * Enable/disable power/clks needed for hw access done in other
54 	 * commit related methods.
55 	 *
56 	 * If mdp4 is migrated to runpm, we could probably drop these
57 	 * and use runpm directly.
58 	 */
59 	void (*enable_commit)(struct msm_kms *kms);
60 	void (*disable_commit)(struct msm_kms *kms);
61 
62 	/**
63 	 * Prepare for atomic commit.  This is called after any previous
64 	 * (async or otherwise) commit has completed.
65 	 */
66 	void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
67 
68 	/**
69 	 * Flush an atomic commit.  This is called after the hardware
70 	 * updates have already been pushed down to effected planes/
71 	 * crtcs/encoders/connectors.
72 	 */
73 	void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
74 
75 	/**
76 	 * Wait for any in-progress flush to complete on the specified
77 	 * crtcs.  This should not block if there is no in-progress
78 	 * commit (ie. don't just wait for a vblank), as it will also
79 	 * be called before ->prepare_commit() to ensure any potential
80 	 * "async" commit has completed.
81 	 */
82 	void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
83 
84 	/**
85 	 * Clean up after commit is completed.  This is called after
86 	 * ->wait_flush(), to give the backend a chance to do any
87 	 * post-commit cleanup.
88 	 */
89 	void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
90 
91 	/*
92 	 * Format handling:
93 	 */
94 
95 	/* do format checking on format modified through fb_cmd2 modifiers */
96 	int (*check_modified_format)(const struct msm_kms *kms,
97 			const struct msm_format *msm_fmt,
98 			const struct drm_mode_fb_cmd2 *cmd,
99 			struct drm_gem_object **bos);
100 
101 	/* misc: */
102 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
103 			struct drm_encoder *encoder);
104 	/* cleanup: */
105 	void (*destroy)(struct msm_kms *kms);
106 
107 	/* snapshot: */
108 	void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms);
109 
110 #ifdef CONFIG_DEBUG_FS
111 	/* debugfs: */
112 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
113 #endif
114 };
115 
116 struct msm_kms;
117 
118 /*
119  * A per-crtc timer for pending async atomic flushes.  Scheduled to expire
120  * shortly before vblank to flush pending async updates.
121  */
122 struct msm_pending_timer {
123 	struct msm_hrtimer_work work;
124 	struct kthread_worker *worker;
125 	struct msm_kms *kms;
126 	unsigned crtc_idx;
127 };
128 
129 struct msm_kms {
130 	const struct msm_kms_funcs *funcs;
131 	struct drm_device *dev;
132 
133 	/* irq number to be passed on to msm_irq_install */
134 	int irq;
135 	bool irq_requested;
136 
137 	/* mapper-id used to request GEM buffer mapped for scanout: */
138 	struct msm_gem_address_space *aspace;
139 
140 	/* disp snapshot support */
141 	struct kthread_worker *dump_worker;
142 	struct kthread_work dump_work;
143 	struct mutex dump_mutex;
144 
145 	/*
146 	 * For async commit, where ->flush_commit() and later happens
147 	 * from the crtc's pending_timer close to end of the frame:
148 	 */
149 	struct mutex commit_lock[MAX_CRTCS];
150 	unsigned pending_crtc_mask;
151 	struct msm_pending_timer pending_timers[MAX_CRTCS];
152 };
153 
msm_kms_init(struct msm_kms * kms,const struct msm_kms_funcs * funcs)154 static inline int msm_kms_init(struct msm_kms *kms,
155 		const struct msm_kms_funcs *funcs)
156 {
157 	unsigned i, ret;
158 
159 	for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
160 		mutex_init(&kms->commit_lock[i]);
161 
162 	kms->funcs = funcs;
163 
164 	for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
165 		ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
166 		if (ret) {
167 			return ret;
168 		}
169 	}
170 
171 	return 0;
172 }
173 
msm_kms_destroy(struct msm_kms * kms)174 static inline void msm_kms_destroy(struct msm_kms *kms)
175 {
176 	unsigned i;
177 
178 	for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
179 		msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
180 }
181 
182 #define for_each_crtc_mask(dev, crtc, crtc_mask) \
183 	drm_for_each_crtc(crtc, dev) \
184 		for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
185 
186 #define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \
187 	drm_for_each_crtc_reverse(crtc, dev) \
188 		for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
189 
190 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
191 void msm_drm_kms_uninit(struct device *dev);
192 
193 #endif /* __MSM_KMS_H__ */
194