1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7 
8 #include <generated/utsrelease.h>
9 
10 #include "msm_disp_snapshot.h"
11 
msm_disp_state_dump_regs(u32 ** reg,u32 aligned_len,void __iomem * base_addr)12 static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
13 {
14 	u32 len_padded;
15 	u32 num_rows;
16 	u32 x0, x4, x8, xc;
17 	void __iomem *addr;
18 	u32 *dump_addr = NULL;
19 	void __iomem *end_addr;
20 	int i;
21 
22 	len_padded = aligned_len * REG_DUMP_ALIGN;
23 	num_rows = aligned_len / REG_DUMP_ALIGN;
24 
25 	addr = base_addr;
26 	end_addr = base_addr + aligned_len;
27 
28 	if (!(*reg))
29 		*reg = kvzalloc(len_padded, GFP_KERNEL);
30 
31 	if (*reg)
32 		dump_addr = *reg;
33 
34 	for (i = 0; i < num_rows; i++) {
35 		x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
36 		x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
37 		x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
38 		xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
39 
40 		if (dump_addr) {
41 			dump_addr[i * 4] = x0;
42 			dump_addr[i * 4 + 1] = x4;
43 			dump_addr[i * 4 + 2] = x8;
44 			dump_addr[i * 4 + 3] = xc;
45 		}
46 
47 		addr += REG_DUMP_ALIGN;
48 	}
49 }
50 
msm_disp_state_print_regs(const u32 * dump_addr,u32 len,void __iomem * base_addr,struct drm_printer * p)51 static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
52 		void __iomem *base_addr, struct drm_printer *p)
53 {
54 	int i;
55 	void __iomem *addr;
56 	u32 num_rows;
57 
58 	if (!dump_addr) {
59 		drm_printf(p, "Registers not stored\n");
60 		return;
61 	}
62 
63 	addr = base_addr;
64 	num_rows = len / REG_DUMP_ALIGN;
65 
66 	for (i = 0; i < num_rows; i++) {
67 		drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
68 				(unsigned long)(addr - base_addr),
69 				dump_addr[i * 4], dump_addr[i * 4 + 1],
70 				dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
71 		addr += REG_DUMP_ALIGN;
72 	}
73 }
74 
msm_disp_state_print(struct msm_disp_state * state,struct drm_printer * p)75 void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
76 {
77 	struct msm_disp_state_block *block, *tmp;
78 
79 	if (!p) {
80 		DRM_ERROR("invalid drm printer\n");
81 		return;
82 	}
83 
84 	drm_printf(p, "---\n");
85 	drm_printf(p, "kernel: " UTS_RELEASE "\n");
86 	drm_printf(p, "module: " KBUILD_MODNAME "\n");
87 	drm_printf(p, "dpu devcoredump\n");
88 	drm_printf(p, "time: %lld.%09ld\n",
89 		state->time.tv_sec, state->time.tv_nsec);
90 
91 	list_for_each_entry_safe(block, tmp, &state->blocks, node) {
92 		drm_printf(p, "====================%s================\n", block->name);
93 		msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
94 	}
95 
96 	drm_printf(p, "===================dpu drm state================\n");
97 
98 	if (state->atomic_state)
99 		drm_atomic_print_new_state(state->atomic_state, p);
100 }
101 
msm_disp_capture_atomic_state(struct msm_disp_state * disp_state)102 static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
103 {
104 	struct drm_device *ddev;
105 	struct drm_modeset_acquire_ctx ctx;
106 
107 	ktime_get_real_ts64(&disp_state->time);
108 
109 	ddev = disp_state->drm_dev;
110 
111 	drm_modeset_acquire_init(&ctx, 0);
112 
113 	while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
114 		drm_modeset_backoff(&ctx);
115 
116 	disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
117 			&ctx);
118 	drm_modeset_drop_locks(&ctx);
119 	drm_modeset_acquire_fini(&ctx);
120 }
121 
msm_disp_snapshot_capture_state(struct msm_disp_state * disp_state)122 void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
123 {
124 	struct msm_drm_private *priv;
125 	struct drm_device *drm_dev;
126 	struct msm_kms *kms;
127 	int i;
128 
129 	drm_dev = disp_state->drm_dev;
130 	priv = drm_dev->dev_private;
131 	kms = priv->kms;
132 
133 	for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
134 		if (!priv->dp[i])
135 			continue;
136 
137 		msm_dp_snapshot(disp_state, priv->dp[i]);
138 	}
139 
140 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
141 		if (!priv->dsi[i])
142 			continue;
143 
144 		msm_dsi_snapshot(disp_state, priv->dsi[i]);
145 	}
146 
147 	if (kms->funcs->snapshot)
148 		kms->funcs->snapshot(disp_state, kms);
149 
150 	msm_disp_capture_atomic_state(disp_state);
151 }
152 
msm_disp_state_free(void * data)153 void msm_disp_state_free(void *data)
154 {
155 	struct msm_disp_state *disp_state = data;
156 	struct msm_disp_state_block *block, *tmp;
157 
158 	if (disp_state->atomic_state) {
159 		drm_atomic_state_put(disp_state->atomic_state);
160 		disp_state->atomic_state = NULL;
161 	}
162 
163 	list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
164 		list_del(&block->node);
165 		kvfree(block->state);
166 		kfree(block);
167 	}
168 
169 	kfree(disp_state);
170 }
171 
msm_disp_snapshot_add_block(struct msm_disp_state * disp_state,u32 len,void __iomem * base_addr,const char * fmt,...)172 void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
173 		void __iomem *base_addr, const char *fmt, ...)
174 {
175 	struct msm_disp_state_block *new_blk;
176 	struct va_format vaf;
177 	va_list va;
178 
179 	new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
180 	if (!new_blk)
181 		return;
182 
183 	va_start(va, fmt);
184 
185 	vaf.fmt = fmt;
186 	vaf.va = &va;
187 	snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
188 
189 	va_end(va);
190 
191 	INIT_LIST_HEAD(&new_blk->node);
192 	new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
193 	new_blk->base_addr = base_addr;
194 
195 	msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
196 	list_add_tail(&new_blk->node, &disp_state->blocks);
197 }
198