1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * TI K3 Cortex-M4 Remote Processor(s) driver
4 *
5 * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
6 * Hari Nagalla <hnagalla@ti.com>
7 */
8
9 #include <linux/io.h>
10 #include <linux/mailbox_client.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/platform_device.h>
15 #include <linux/remoteproc.h>
16 #include <linux/reset.h>
17 #include <linux/slab.h>
18
19 #include "omap_remoteproc.h"
20 #include "remoteproc_internal.h"
21 #include "ti_sci_proc.h"
22
23 #define K3_M4_IRAM_DEV_ADDR 0x00000
24 #define K3_M4_DRAM_DEV_ADDR 0x30000
25
26 /**
27 * struct k3_m4_rproc_mem - internal memory structure
28 * @cpu_addr: MPU virtual address of the memory region
29 * @bus_addr: Bus address used to access the memory region
30 * @dev_addr: Device address of the memory region from remote processor view
31 * @size: Size of the memory region
32 */
33 struct k3_m4_rproc_mem {
34 void __iomem *cpu_addr;
35 phys_addr_t bus_addr;
36 u32 dev_addr;
37 size_t size;
38 };
39
40 /**
41 * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
42 * @name: name for this memory entry
43 * @dev_addr: device address for the memory entry
44 */
45 struct k3_m4_rproc_mem_data {
46 const char *name;
47 const u32 dev_addr;
48 };
49
50 /**
51 * struct k3_m4_rproc - k3 remote processor driver structure
52 * @dev: cached device pointer
53 * @mem: internal memory regions data
54 * @num_mems: number of internal memory regions
55 * @rmem: reserved memory regions data
56 * @num_rmems: number of reserved memory regions
57 * @reset: reset control handle
58 * @tsp: TI-SCI processor control handle
59 * @ti_sci: TI-SCI handle
60 * @ti_sci_id: TI-SCI device identifier
61 * @mbox: mailbox channel handle
62 * @client: mailbox client to request the mailbox channel
63 */
64 struct k3_m4_rproc {
65 struct device *dev;
66 struct k3_m4_rproc_mem *mem;
67 int num_mems;
68 struct k3_m4_rproc_mem *rmem;
69 int num_rmems;
70 struct reset_control *reset;
71 struct ti_sci_proc *tsp;
72 const struct ti_sci_handle *ti_sci;
73 u32 ti_sci_id;
74 struct mbox_chan *mbox;
75 struct mbox_client client;
76 };
77
78 /**
79 * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
80 * @client: mailbox client pointer used for requesting the mailbox channel
81 * @data: mailbox payload
82 *
83 * This handler is invoked by the K3 mailbox driver whenever a mailbox
84 * message is received. Usually, the mailbox payload simply contains
85 * the index of the virtqueue that is kicked by the remote processor,
86 * and we let remoteproc core handle it.
87 *
88 * In addition to virtqueue indices, we also have some out-of-band values
89 * that indicate different events. Those values are deliberately very
90 * large so they don't coincide with virtqueue indices.
91 */
k3_m4_rproc_mbox_callback(struct mbox_client * client,void * data)92 static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
93 {
94 struct device *dev = client->dev;
95 struct rproc *rproc = dev_get_drvdata(dev);
96 u32 msg = (u32)(uintptr_t)(data);
97
98 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
99
100 switch (msg) {
101 case RP_MBOX_CRASH:
102 /*
103 * remoteproc detected an exception, but error recovery is not
104 * supported. So, just log this for now
105 */
106 dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
107 break;
108 case RP_MBOX_ECHO_REPLY:
109 dev_info(dev, "received echo reply from %s\n", rproc->name);
110 break;
111 default:
112 /* silently handle all other valid messages */
113 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
114 return;
115 if (msg > rproc->max_notifyid) {
116 dev_dbg(dev, "dropping unknown message 0x%x", msg);
117 return;
118 }
119 /* msg contains the index of the triggered vring */
120 if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
121 dev_dbg(dev, "no message was found in vqid %d\n", msg);
122 }
123 }
124
125 /*
126 * Kick the remote processor to notify about pending unprocessed messages.
127 * The vqid usage is not used and is inconsequential, as the kick is performed
128 * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
129 * the remote processor is expected to process both its Tx and Rx virtqueues.
130 */
k3_m4_rproc_kick(struct rproc * rproc,int vqid)131 static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
132 {
133 struct k3_m4_rproc *kproc = rproc->priv;
134 struct device *dev = kproc->dev;
135 u32 msg = (u32)vqid;
136 int ret;
137
138 /*
139 * Send the index of the triggered virtqueue in the mailbox payload.
140 * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
141 * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
142 */
143 ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
144 if (ret < 0)
145 dev_err(dev, "failed to send mailbox message, status = %d\n",
146 ret);
147 }
148
k3_m4_rproc_ping_mbox(struct k3_m4_rproc * kproc)149 static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
150 {
151 struct device *dev = kproc->dev;
152 int ret;
153
154 /*
155 * Ping the remote processor, this is only for sanity-sake for now;
156 * there is no functional effect whatsoever.
157 *
158 * Note that the reply will _not_ arrive immediately: this message
159 * will wait in the mailbox fifo until the remote processor is booted.
160 */
161 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
162 if (ret < 0) {
163 dev_err(dev, "mbox_send_message failed: %d\n", ret);
164 return ret;
165 }
166
167 return 0;
168 }
169
170 /*
171 * The M4 cores have a local reset that affects only the CPU, and a
172 * generic module reset that powers on the device and allows the internal
173 * memories to be accessed while the local reset is asserted. This function is
174 * used to release the global reset on remote cores to allow loading into the
175 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
176 * firmware loading, and is followed by the .start() ops after loading to
177 * actually let the remote cores to run.
178 */
k3_m4_rproc_prepare(struct rproc * rproc)179 static int k3_m4_rproc_prepare(struct rproc *rproc)
180 {
181 struct k3_m4_rproc *kproc = rproc->priv;
182 struct device *dev = kproc->dev;
183 int ret;
184
185 /* If the core is running already no need to deassert the module reset */
186 if (rproc->state == RPROC_DETACHED)
187 return 0;
188
189 /*
190 * Ensure the local reset is asserted so the core doesn't
191 * execute bogus code when the module reset is released.
192 */
193 ret = reset_control_assert(kproc->reset);
194 if (ret) {
195 dev_err(dev, "could not assert local reset\n");
196 return ret;
197 }
198
199 ret = reset_control_status(kproc->reset);
200 if (ret <= 0) {
201 dev_err(dev, "local reset still not asserted\n");
202 return ret;
203 }
204
205 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
206 kproc->ti_sci_id);
207 if (ret) {
208 dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
209 return ret;
210 }
211
212 return 0;
213 }
214
215 /*
216 * This function implements the .unprepare() ops and performs the complimentary
217 * operations to that of the .prepare() ops. The function is used to assert the
218 * global reset on applicable cores. This completes the second portion of
219 * powering down the remote core. The cores themselves are only halted in the
220 * .stop() callback through the local reset, and the .unprepare() ops is invoked
221 * by the remoteproc core after the remoteproc is stopped to balance the global
222 * reset.
223 */
k3_m4_rproc_unprepare(struct rproc * rproc)224 static int k3_m4_rproc_unprepare(struct rproc *rproc)
225 {
226 struct k3_m4_rproc *kproc = rproc->priv;
227 struct device *dev = kproc->dev;
228 int ret;
229
230 /* If the core is going to be detached do not assert the module reset */
231 if (rproc->state == RPROC_ATTACHED)
232 return 0;
233
234 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
235 kproc->ti_sci_id);
236 if (ret) {
237 dev_err(dev, "module-reset assert failed\n");
238 return ret;
239 }
240
241 return 0;
242 }
243
244 /*
245 * This function implements the .get_loaded_rsc_table() callback and is used
246 * to provide the resource table for a booted remote processor in IPC-only
247 * mode. The remote processor firmwares follow a design-by-contract approach
248 * and are expected to have the resource table at the base of the DDR region
249 * reserved for firmware usage. This provides flexibility for the remote
250 * processor to be booted by different bootloaders that may or may not have the
251 * ability to publish the resource table address and size through a DT
252 * property.
253 */
k3_m4_get_loaded_rsc_table(struct rproc * rproc,size_t * rsc_table_sz)254 static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
255 size_t *rsc_table_sz)
256 {
257 struct k3_m4_rproc *kproc = rproc->priv;
258 struct device *dev = kproc->dev;
259
260 if (!kproc->rmem[0].cpu_addr) {
261 dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
262 return ERR_PTR(-ENOMEM);
263 }
264
265 /*
266 * NOTE: The resource table size is currently hard-coded to a maximum
267 * of 256 bytes. The most common resource table usage for K3 firmwares
268 * is to only have the vdev resource entry and an optional trace entry.
269 * The exact size could be computed based on resource table address, but
270 * the hard-coded value suffices to support the IPC-only mode.
271 */
272 *rsc_table_sz = 256;
273 return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
274 }
275
276 /*
277 * Custom function to translate a remote processor device address (internal
278 * RAMs only) to a kernel virtual address. The remote processors can access
279 * their RAMs at either an internal address visible only from a remote
280 * processor, or at the SoC-level bus address. Both these addresses need to be
281 * looked through for translation. The translated addresses can be used either
282 * by the remoteproc core for loading (when using kernel remoteproc loader), or
283 * by any rpmsg bus drivers.
284 */
k3_m4_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)285 static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
286 {
287 struct k3_m4_rproc *kproc = rproc->priv;
288 void __iomem *va = NULL;
289 phys_addr_t bus_addr;
290 u32 dev_addr, offset;
291 size_t size;
292 int i;
293
294 if (len == 0)
295 return NULL;
296
297 for (i = 0; i < kproc->num_mems; i++) {
298 bus_addr = kproc->mem[i].bus_addr;
299 dev_addr = kproc->mem[i].dev_addr;
300 size = kproc->mem[i].size;
301
302 /* handle M4-view addresses */
303 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
304 offset = da - dev_addr;
305 va = kproc->mem[i].cpu_addr + offset;
306 return (__force void *)va;
307 }
308
309 /* handle SoC-view addresses */
310 if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
311 offset = da - bus_addr;
312 va = kproc->mem[i].cpu_addr + offset;
313 return (__force void *)va;
314 }
315 }
316
317 /* handle static DDR reserved memory regions */
318 for (i = 0; i < kproc->num_rmems; i++) {
319 dev_addr = kproc->rmem[i].dev_addr;
320 size = kproc->rmem[i].size;
321
322 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
323 offset = da - dev_addr;
324 va = kproc->rmem[i].cpu_addr + offset;
325 return (__force void *)va;
326 }
327 }
328
329 return NULL;
330 }
331
k3_m4_rproc_of_get_memories(struct platform_device * pdev,struct k3_m4_rproc * kproc)332 static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
333 struct k3_m4_rproc *kproc)
334 {
335 static const char * const mem_names[] = { "iram", "dram" };
336 static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
337 struct device *dev = &pdev->dev;
338 struct resource *res;
339 int num_mems;
340 int i;
341
342 num_mems = ARRAY_SIZE(mem_names);
343 kproc->mem = devm_kcalloc(kproc->dev, num_mems,
344 sizeof(*kproc->mem), GFP_KERNEL);
345 if (!kproc->mem)
346 return -ENOMEM;
347
348 for (i = 0; i < num_mems; i++) {
349 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
350 mem_names[i]);
351 if (!res) {
352 dev_err(dev, "found no memory resource for %s\n",
353 mem_names[i]);
354 return -EINVAL;
355 }
356 if (!devm_request_mem_region(dev, res->start,
357 resource_size(res),
358 dev_name(dev))) {
359 dev_err(dev, "could not request %s region for resource\n",
360 mem_names[i]);
361 return -EBUSY;
362 }
363
364 kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
365 resource_size(res));
366 if (!kproc->mem[i].cpu_addr) {
367 dev_err(dev, "failed to map %s memory\n",
368 mem_names[i]);
369 return -ENOMEM;
370 }
371 kproc->mem[i].bus_addr = res->start;
372 kproc->mem[i].dev_addr = mem_addrs[i];
373 kproc->mem[i].size = resource_size(res);
374
375 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
376 mem_names[i], &kproc->mem[i].bus_addr,
377 kproc->mem[i].size, kproc->mem[i].cpu_addr,
378 kproc->mem[i].dev_addr);
379 }
380 kproc->num_mems = num_mems;
381
382 return 0;
383 }
384
k3_m4_rproc_dev_mem_release(void * data)385 static void k3_m4_rproc_dev_mem_release(void *data)
386 {
387 struct device *dev = data;
388
389 of_reserved_mem_device_release(dev);
390 }
391
k3_m4_reserved_mem_init(struct k3_m4_rproc * kproc)392 static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
393 {
394 struct device *dev = kproc->dev;
395 struct device_node *np = dev->of_node;
396 struct device_node *rmem_np;
397 struct reserved_mem *rmem;
398 int num_rmems;
399 int ret, i;
400
401 num_rmems = of_property_count_elems_of_size(np, "memory-region",
402 sizeof(phandle));
403 if (num_rmems < 0) {
404 dev_err(dev, "device does not reserved memory regions (%d)\n",
405 num_rmems);
406 return -EINVAL;
407 }
408 if (num_rmems < 2) {
409 dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
410 num_rmems);
411 return -EINVAL;
412 }
413
414 /* use reserved memory region 0 for vring DMA allocations */
415 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
416 if (ret) {
417 dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
418 return ret;
419 }
420 ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
421 if (ret)
422 return ret;
423
424 num_rmems--;
425 kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
426 if (!kproc->rmem)
427 return -ENOMEM;
428
429 /* use remaining reserved memory regions for static carveouts */
430 for (i = 0; i < num_rmems; i++) {
431 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
432 if (!rmem_np)
433 return -EINVAL;
434
435 rmem = of_reserved_mem_lookup(rmem_np);
436 if (!rmem) {
437 of_node_put(rmem_np);
438 return -EINVAL;
439 }
440 of_node_put(rmem_np);
441
442 kproc->rmem[i].bus_addr = rmem->base;
443 /* 64-bit address regions currently not supported */
444 kproc->rmem[i].dev_addr = (u32)rmem->base;
445 kproc->rmem[i].size = rmem->size;
446 kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
447 if (!kproc->rmem[i].cpu_addr) {
448 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
449 i + 1, &rmem->base, &rmem->size);
450 return -ENOMEM;
451 }
452
453 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
454 i + 1, &kproc->rmem[i].bus_addr,
455 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
456 kproc->rmem[i].dev_addr);
457 }
458 kproc->num_rmems = num_rmems;
459
460 return 0;
461 }
462
k3_m4_release_tsp(void * data)463 static void k3_m4_release_tsp(void *data)
464 {
465 struct ti_sci_proc *tsp = data;
466
467 ti_sci_proc_release(tsp);
468 }
469
470 /*
471 * Power up the M4 remote processor.
472 *
473 * This function will be invoked only after the firmware for this rproc
474 * was loaded, parsed successfully, and all of its resource requirements
475 * were met. This callback is invoked only in remoteproc mode.
476 */
k3_m4_rproc_start(struct rproc * rproc)477 static int k3_m4_rproc_start(struct rproc *rproc)
478 {
479 struct k3_m4_rproc *kproc = rproc->priv;
480 struct device *dev = kproc->dev;
481 int ret;
482
483 ret = k3_m4_rproc_ping_mbox(kproc);
484 if (ret)
485 return ret;
486
487 ret = reset_control_deassert(kproc->reset);
488 if (ret) {
489 dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
490 return ret;
491 }
492
493 return 0;
494 }
495
496 /*
497 * Stop the M4 remote processor.
498 *
499 * This function puts the M4 processor into reset, and finishes processing
500 * of any pending messages. This callback is invoked only in remoteproc mode.
501 */
k3_m4_rproc_stop(struct rproc * rproc)502 static int k3_m4_rproc_stop(struct rproc *rproc)
503 {
504 struct k3_m4_rproc *kproc = rproc->priv;
505 struct device *dev = kproc->dev;
506 int ret;
507
508 ret = reset_control_assert(kproc->reset);
509 if (ret) {
510 dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
511 return ret;
512 }
513
514 return 0;
515 }
516
517 /*
518 * Attach to a running M4 remote processor (IPC-only mode)
519 *
520 * The remote processor is already booted, so there is no need to issue any
521 * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
522 * mode.
523 */
k3_m4_rproc_attach(struct rproc * rproc)524 static int k3_m4_rproc_attach(struct rproc *rproc)
525 {
526 struct k3_m4_rproc *kproc = rproc->priv;
527 int ret;
528
529 ret = k3_m4_rproc_ping_mbox(kproc);
530 if (ret)
531 return ret;
532
533 return 0;
534 }
535
536 /*
537 * Detach from a running M4 remote processor (IPC-only mode)
538 *
539 * This rproc detach callback performs the opposite operation to attach
540 * callback, the M4 core is not stopped and will be left to continue to
541 * run its booted firmware. This callback is invoked only in IPC-only mode.
542 */
k3_m4_rproc_detach(struct rproc * rproc)543 static int k3_m4_rproc_detach(struct rproc *rproc)
544 {
545 return 0;
546 }
547
548 static const struct rproc_ops k3_m4_rproc_ops = {
549 .prepare = k3_m4_rproc_prepare,
550 .unprepare = k3_m4_rproc_unprepare,
551 .start = k3_m4_rproc_start,
552 .stop = k3_m4_rproc_stop,
553 .attach = k3_m4_rproc_attach,
554 .detach = k3_m4_rproc_detach,
555 .kick = k3_m4_rproc_kick,
556 .da_to_va = k3_m4_rproc_da_to_va,
557 .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
558 };
559
k3_m4_rproc_probe(struct platform_device * pdev)560 static int k3_m4_rproc_probe(struct platform_device *pdev)
561 {
562 struct device *dev = &pdev->dev;
563 struct k3_m4_rproc *kproc;
564 struct rproc *rproc;
565 const char *fw_name;
566 bool r_state = false;
567 bool p_state = false;
568 int ret;
569
570 ret = rproc_of_parse_firmware(dev, 0, &fw_name);
571 if (ret)
572 return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
573
574 rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
575 sizeof(*kproc));
576 if (!rproc)
577 return -ENOMEM;
578
579 rproc->has_iommu = false;
580 rproc->recovery_disabled = true;
581 kproc = rproc->priv;
582 kproc->dev = dev;
583 platform_set_drvdata(pdev, rproc);
584
585 kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
586 if (IS_ERR(kproc->ti_sci))
587 return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
588 "failed to get ti-sci handle\n");
589
590 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id);
591 if (ret)
592 return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
593
594 kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
595 if (IS_ERR(kproc->reset))
596 return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n");
597
598 kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
599 if (IS_ERR(kproc->tsp))
600 return dev_err_probe(dev, PTR_ERR(kproc->tsp),
601 "failed to construct ti-sci proc control\n");
602
603 ret = ti_sci_proc_request(kproc->tsp);
604 if (ret < 0)
605 return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
606 ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
607 if (ret)
608 return ret;
609
610 ret = k3_m4_rproc_of_get_memories(pdev, kproc);
611 if (ret)
612 return ret;
613
614 ret = k3_m4_reserved_mem_init(kproc);
615 if (ret)
616 return dev_err_probe(dev, ret, "reserved memory init failed\n");
617
618 ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
619 &r_state, &p_state);
620 if (ret)
621 return dev_err_probe(dev, ret,
622 "failed to get initial state, mode cannot be determined\n");
623
624 /* configure devices for either remoteproc or IPC-only mode */
625 if (p_state) {
626 rproc->state = RPROC_DETACHED;
627 dev_info(dev, "configured M4F for IPC-only mode\n");
628 } else {
629 dev_info(dev, "configured M4F for remoteproc mode\n");
630 }
631
632 kproc->client.dev = dev;
633 kproc->client.tx_done = NULL;
634 kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
635 kproc->client.tx_block = false;
636 kproc->client.knows_txdone = false;
637 kproc->mbox = mbox_request_channel(&kproc->client, 0);
638 if (IS_ERR(kproc->mbox))
639 return dev_err_probe(dev, PTR_ERR(kproc->mbox),
640 "mbox_request_channel failed\n");
641
642 ret = devm_rproc_add(dev, rproc);
643 if (ret)
644 return dev_err_probe(dev, ret,
645 "failed to register device with remoteproc core\n");
646
647 return 0;
648 }
649
650 static const struct of_device_id k3_m4_of_match[] = {
651 { .compatible = "ti,am64-m4fss", },
652 { /* sentinel */ },
653 };
654 MODULE_DEVICE_TABLE(of, k3_m4_of_match);
655
656 static struct platform_driver k3_m4_rproc_driver = {
657 .probe = k3_m4_rproc_probe,
658 .driver = {
659 .name = "k3-m4-rproc",
660 .of_match_table = k3_m4_of_match,
661 },
662 };
663 module_platform_driver(k3_m4_rproc_driver);
664
665 MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
666 MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
667 MODULE_LICENSE("GPL");
668