1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <linux/component.h>
7
8 #include <drm/intel/i915_component.h>
9 #include <drm/intel/i915_gsc_proxy_mei_interface.h>
10
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_print.h"
13 #include "intel_gsc_proxy.h"
14 #include "intel_gsc_uc.h"
15 #include "intel_gsc_uc_heci_cmd_submit.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18
19 /*
20 * GSC proxy:
21 * The GSC uC needs to communicate with the CSME to perform certain operations.
22 * Since the GSC can't perform this communication directly on platforms where it
23 * is integrated in GT, i915 needs to transfer the messages from GSC to CSME
24 * and back. i915 must manually start the proxy flow after the GSC is loaded to
25 * signal to GSC that we're ready to handle its messages and allow it to query
26 * its init data from CSME; GSC will then trigger an HECI2 interrupt if it needs
27 * to send messages to CSME again.
28 * The proxy flow is as follow:
29 * 1 - i915 submits a request to GSC asking for the message to CSME
30 * 2 - GSC replies with the proxy header + payload for CSME
31 * 3 - i915 sends the reply from GSC as-is to CSME via the mei proxy component
32 * 4 - CSME replies with the proxy header + payload for GSC
33 * 5 - i915 submits a request to GSC with the reply from CSME
34 * 6 - GSC replies either with a new header + payload (same as step 2, so we
35 * restart from there) or with an end message.
36 */
37
38 /*
39 * The component should load quite quickly in most cases, but it could take
40 * a bit. Using a very big timeout just to cover the worst case scenario
41 */
42 #define GSC_PROXY_INIT_TIMEOUT_MS 20000
43
44 /* the protocol supports up to 32K in each direction */
45 #define GSC_PROXY_BUFFER_SIZE SZ_32K
46 #define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
47 #define GSC_PROXY_MAX_MSG_SIZE (GSC_PROXY_BUFFER_SIZE - sizeof(struct intel_gsc_mtl_header))
48
49 /* FW-defined proxy header */
50 struct intel_gsc_proxy_header {
51 /*
52 * hdr:
53 * Bits 0-7: type of the proxy message (see enum intel_gsc_proxy_type)
54 * Bits 8-15: rsvd
55 * Bits 16-31: length in bytes of the payload following the proxy header
56 */
57 u32 hdr;
58 #define GSC_PROXY_TYPE GENMASK(7, 0)
59 #define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16)
60
61 u32 source; /* Source of the Proxy message */
62 u32 destination; /* Destination of the Proxy message */
63 #define GSC_PROXY_ADDRESSING_KMD 0x10000
64 #define GSC_PROXY_ADDRESSING_GSC 0x20000
65 #define GSC_PROXY_ADDRESSING_CSME 0x30000
66
67 u32 status; /* Command status */
68 } __packed;
69
70 /* FW-defined proxy types */
71 enum intel_gsc_proxy_type {
72 GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0,
73 GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1,
74 GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2,
75 GSC_PROXY_MSG_TYPE_PROXY_END = 3,
76 GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4,
77 };
78
79 struct gsc_proxy_msg {
80 struct intel_gsc_mtl_header header;
81 struct intel_gsc_proxy_header proxy_header;
82 } __packed;
83
proxy_send_to_csme(struct intel_gsc_uc * gsc)84 static int proxy_send_to_csme(struct intel_gsc_uc *gsc)
85 {
86 struct intel_gt *gt = gsc_uc_to_gt(gsc);
87 struct i915_gsc_proxy_component *comp = gsc->proxy.component;
88 struct intel_gsc_mtl_header *hdr;
89 void *in = gsc->proxy.to_csme;
90 void *out = gsc->proxy.to_gsc;
91 u32 in_size;
92 int ret;
93
94 /* CSME msg only includes the proxy */
95 hdr = in;
96 in += sizeof(struct intel_gsc_mtl_header);
97 out += sizeof(struct intel_gsc_mtl_header);
98
99 in_size = hdr->message_size - sizeof(struct intel_gsc_mtl_header);
100
101 /* the message must contain at least the proxy header */
102 if (in_size < sizeof(struct intel_gsc_proxy_header) ||
103 in_size > GSC_PROXY_MAX_MSG_SIZE) {
104 gt_err(gt, "Invalid CSME message size: %u\n", in_size);
105 return -EINVAL;
106 }
107
108 ret = comp->ops->send(comp->mei_dev, in, in_size);
109 if (ret < 0) {
110 gt_err(gt, "Failed to send CSME message\n");
111 return ret;
112 }
113
114 ret = comp->ops->recv(comp->mei_dev, out, GSC_PROXY_MAX_MSG_SIZE);
115 if (ret < 0) {
116 gt_err(gt, "Failed to receive CSME message\n");
117 return ret;
118 }
119
120 return ret;
121 }
122
proxy_send_to_gsc(struct intel_gsc_uc * gsc)123 static int proxy_send_to_gsc(struct intel_gsc_uc *gsc)
124 {
125 struct intel_gt *gt = gsc_uc_to_gt(gsc);
126 u32 *marker = gsc->proxy.to_csme; /* first dw of the reply header */
127 u64 addr_in = i915_ggtt_offset(gsc->proxy.vma);
128 u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
129 u32 size = ((struct gsc_proxy_msg *)gsc->proxy.to_gsc)->header.message_size;
130 int err;
131
132 /* the message must contain at least the gsc and proxy headers */
133 if (size < sizeof(struct gsc_proxy_msg) || size > GSC_PROXY_BUFFER_SIZE) {
134 gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
135 return -EINVAL;
136 }
137
138 /* clear the message marker */
139 *marker = 0;
140
141 /* make sure the marker write is flushed */
142 wmb();
143
144 /* send the request */
145 err = intel_gsc_uc_heci_cmd_submit_packet(gsc, addr_in, size,
146 addr_out, GSC_PROXY_BUFFER_SIZE);
147
148 if (!err) {
149 /* wait for the reply to show up */
150 err = wait_for(*marker != 0, 300);
151 if (err)
152 gt_err(gt, "Failed to get a proxy reply from gsc\n");
153 }
154
155 return err;
156 }
157
validate_proxy_header(struct intel_gsc_proxy_header * header,u32 source,u32 dest)158 static int validate_proxy_header(struct intel_gsc_proxy_header *header,
159 u32 source, u32 dest)
160 {
161 u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
162 u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
163 int ret = 0;
164
165 if (header->destination != dest || header->source != source) {
166 ret = -ENOEXEC;
167 goto fail;
168 }
169
170 switch (type) {
171 case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
172 if (length > 0)
173 break;
174 fallthrough;
175 case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
176 ret = -EIO;
177 goto fail;
178 default:
179 break;
180 }
181
182 fail:
183 return ret;
184 }
185
proxy_query(struct intel_gsc_uc * gsc)186 static int proxy_query(struct intel_gsc_uc *gsc)
187 {
188 struct intel_gt *gt = gsc_uc_to_gt(gsc);
189 struct gsc_proxy_msg *to_gsc = gsc->proxy.to_gsc;
190 struct gsc_proxy_msg *to_csme = gsc->proxy.to_csme;
191 int ret;
192
193 intel_gsc_uc_heci_cmd_emit_mtl_header(&to_gsc->header,
194 HECI_MEADDRESS_PROXY,
195 sizeof(struct gsc_proxy_msg),
196 0);
197
198 to_gsc->proxy_header.hdr =
199 FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
200 FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0);
201
202 to_gsc->proxy_header.source = GSC_PROXY_ADDRESSING_KMD;
203 to_gsc->proxy_header.destination = GSC_PROXY_ADDRESSING_GSC;
204 to_gsc->proxy_header.status = 0;
205
206 while (1) {
207 /* clear the GSC response header space */
208 memset(gsc->proxy.to_csme, 0, sizeof(struct gsc_proxy_msg));
209
210 /* send proxy message to GSC */
211 ret = proxy_send_to_gsc(gsc);
212 if (ret) {
213 gt_err(gt, "failed to send proxy message to GSC! %d\n", ret);
214 goto proxy_error;
215 }
216
217 /* stop if this was the last message */
218 if (FIELD_GET(GSC_PROXY_TYPE, to_csme->proxy_header.hdr) ==
219 GSC_PROXY_MSG_TYPE_PROXY_END)
220 break;
221
222 /* make sure the GSC-to-CSME proxy header is sane */
223 ret = validate_proxy_header(&to_csme->proxy_header,
224 GSC_PROXY_ADDRESSING_GSC,
225 GSC_PROXY_ADDRESSING_CSME);
226 if (ret) {
227 gt_err(gt, "invalid GSC to CSME proxy header! %d\n", ret);
228 goto proxy_error;
229 }
230
231 /* send the GSC message to the CSME */
232 ret = proxy_send_to_csme(gsc);
233 if (ret < 0) {
234 gt_err(gt, "failed to send proxy message to CSME! %d\n", ret);
235 goto proxy_error;
236 }
237
238 /* update the GSC message size with the returned value from CSME */
239 to_gsc->header.message_size = ret + sizeof(struct intel_gsc_mtl_header);
240
241 /* make sure the CSME-to-GSC proxy header is sane */
242 ret = validate_proxy_header(&to_gsc->proxy_header,
243 GSC_PROXY_ADDRESSING_CSME,
244 GSC_PROXY_ADDRESSING_GSC);
245 if (ret) {
246 gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
247 goto proxy_error;
248 }
249 }
250
251 proxy_error:
252 return ret < 0 ? ret : 0;
253 }
254
intel_gsc_proxy_request_handler(struct intel_gsc_uc * gsc)255 int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc)
256 {
257 struct intel_gt *gt = gsc_uc_to_gt(gsc);
258 int err;
259
260 if (!gsc->proxy.component_added)
261 return -ENODEV;
262
263 assert_rpm_wakelock_held(gt->uncore->rpm);
264
265 /* when GSC is loaded, we can queue this before the component is bound */
266 err = wait_for(gsc->proxy.component, GSC_PROXY_INIT_TIMEOUT_MS);
267 if (err) {
268 gt_err(gt, "GSC proxy component didn't bind within the expected timeout\n");
269 return -EIO;
270 }
271
272 mutex_lock(&gsc->proxy.mutex);
273 if (!gsc->proxy.component) {
274 gt_err(gt, "GSC proxy worker called without the component being bound!\n");
275 err = -EIO;
276 } else {
277 /*
278 * write the status bit to clear it and allow new proxy
279 * interrupts to be generated while we handle the current
280 * request, but be sure not to write the reset bit
281 */
282 intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
283 HECI_H_CSR_RST, HECI_H_CSR_IS);
284 err = proxy_query(gsc);
285 }
286 mutex_unlock(&gsc->proxy.mutex);
287 return err;
288 }
289
intel_gsc_proxy_irq_handler(struct intel_gsc_uc * gsc,u32 iir)290 void intel_gsc_proxy_irq_handler(struct intel_gsc_uc *gsc, u32 iir)
291 {
292 struct intel_gt *gt = gsc_uc_to_gt(gsc);
293
294 if (unlikely(!iir))
295 return;
296
297 lockdep_assert_held(gt->irq_lock);
298
299 if (!gsc->proxy.component) {
300 gt_err(gt, "GSC proxy irq received without the component being bound!\n");
301 return;
302 }
303
304 gsc->gsc_work_actions |= GSC_ACTION_SW_PROXY;
305 queue_work(gsc->wq, &gsc->work);
306 }
307
i915_gsc_proxy_component_bind(struct device * i915_kdev,struct device * mei_kdev,void * data)308 static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
309 struct device *mei_kdev, void *data)
310 {
311 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
312 struct intel_gt *gt = i915->media_gt;
313 struct intel_gsc_uc *gsc = >->uc.gsc;
314 intel_wakeref_t wakeref;
315
316 /* enable HECI2 IRQs */
317 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
318 intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
319 HECI_H_CSR_RST, HECI_H_CSR_IE);
320
321 mutex_lock(&gsc->proxy.mutex);
322 gsc->proxy.component = data;
323 gsc->proxy.component->mei_dev = mei_kdev;
324 mutex_unlock(&gsc->proxy.mutex);
325 gt_dbg(gt, "GSC proxy mei component bound\n");
326
327 return 0;
328 }
329
i915_gsc_proxy_component_unbind(struct device * i915_kdev,struct device * mei_kdev,void * data)330 static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
331 struct device *mei_kdev, void *data)
332 {
333 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
334 struct intel_gt *gt = i915->media_gt;
335 struct intel_gsc_uc *gsc = >->uc.gsc;
336 intel_wakeref_t wakeref;
337
338 mutex_lock(&gsc->proxy.mutex);
339 gsc->proxy.component = NULL;
340 mutex_unlock(&gsc->proxy.mutex);
341
342 /* disable HECI2 IRQs */
343 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
344 intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
345 HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
346 gt_dbg(gt, "GSC proxy mei component unbound\n");
347 }
348
349 static const struct component_ops i915_gsc_proxy_component_ops = {
350 .bind = i915_gsc_proxy_component_bind,
351 .unbind = i915_gsc_proxy_component_unbind,
352 };
353
proxy_channel_alloc(struct intel_gsc_uc * gsc)354 static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
355 {
356 struct intel_gt *gt = gsc_uc_to_gt(gsc);
357 struct i915_vma *vma;
358 void *vaddr;
359 int err;
360
361 err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
362 GSC_PROXY_CHANNEL_SIZE,
363 &vma, &vaddr);
364 if (err)
365 return err;
366
367 gsc->proxy.vma = vma;
368 gsc->proxy.to_gsc = vaddr;
369 gsc->proxy.to_csme = vaddr + GSC_PROXY_BUFFER_SIZE;
370
371 return 0;
372 }
373
proxy_channel_free(struct intel_gsc_uc * gsc)374 static void proxy_channel_free(struct intel_gsc_uc *gsc)
375 {
376 if (!gsc->proxy.vma)
377 return;
378
379 gsc->proxy.to_gsc = NULL;
380 gsc->proxy.to_csme = NULL;
381 i915_vma_unpin_and_release(&gsc->proxy.vma, I915_VMA_RELEASE_MAP);
382 }
383
intel_gsc_proxy_fini(struct intel_gsc_uc * gsc)384 void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc)
385 {
386 struct intel_gt *gt = gsc_uc_to_gt(gsc);
387 struct drm_i915_private *i915 = gt->i915;
388
389 if (fetch_and_zero(&gsc->proxy.component_added))
390 component_del(i915->drm.dev, &i915_gsc_proxy_component_ops);
391
392 proxy_channel_free(gsc);
393 }
394
intel_gsc_proxy_init(struct intel_gsc_uc * gsc)395 int intel_gsc_proxy_init(struct intel_gsc_uc *gsc)
396 {
397 int err;
398 struct intel_gt *gt = gsc_uc_to_gt(gsc);
399 struct drm_i915_private *i915 = gt->i915;
400
401 mutex_init(&gsc->proxy.mutex);
402
403 if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
404 gt_info(gt, "can't init GSC proxy due to missing mei component\n");
405 return -ENODEV;
406 }
407
408 err = proxy_channel_alloc(gsc);
409 if (err)
410 return err;
411
412 err = component_add_typed(i915->drm.dev, &i915_gsc_proxy_component_ops,
413 I915_COMPONENT_GSC_PROXY);
414 if (err < 0) {
415 gt_err(gt, "Failed to add GSC_PROXY component (%d)\n", err);
416 goto out_free;
417 }
418
419 gsc->proxy.component_added = true;
420
421 return 0;
422
423 out_free:
424 proxy_channel_free(gsc);
425 return err;
426 }
427
428