1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for Renesas RZ/G2L CRU
4 *
5 * Copyright (C) 2022 Renesas Electronics Corp.
6 *
7 * Based on Renesas R-Car VIN
8 * Copyright (C) 2016 Renesas Electronics Corp.
9 * Copyright (C) 2011-2013 Renesas Solutions Corp.
10 * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
11 * Copyright (C) 2008 Magnus Damm
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/pm_runtime.h>
17
18 #include <media/v4l2-ioctl.h>
19 #include <media/videobuf2-dma-contig.h>
20
21 #include "rzg2l-cru.h"
22
23 /* HW CRU Registers Definition */
24
25 /* CRU Control Register */
26 #define CRUnCTRL 0x0
27 #define CRUnCTRL_VINSEL(x) ((x) << 0)
28
29 /* CRU Interrupt Enable Register */
30 #define CRUnIE 0x4
31 #define CRUnIE_EFE BIT(17)
32
33 /* CRU Interrupt Status Register */
34 #define CRUnINTS 0x8
35 #define CRUnINTS_SFS BIT(16)
36
37 /* CRU Reset Register */
38 #define CRUnRST 0xc
39 #define CRUnRST_VRESETN BIT(0)
40
41 /* Memory Bank Base Address (Lower) Register for CRU Image Data */
42 #define AMnMBxADDRL(x) (0x100 + ((x) * 8))
43
44 /* Memory Bank Base Address (Higher) Register for CRU Image Data */
45 #define AMnMBxADDRH(x) (0x104 + ((x) * 8))
46
47 /* Memory Bank Enable Register for CRU Image Data */
48 #define AMnMBVALID 0x148
49 #define AMnMBVALID_MBVALID(x) GENMASK(x, 0)
50
51 /* Memory Bank Status Register for CRU Image Data */
52 #define AMnMBS 0x14c
53 #define AMnMBS_MBSTS 0x7
54
55 /* AXI Master FIFO Pointer Register for CRU Image Data */
56 #define AMnFIFOPNTR 0x168
57 #define AMnFIFOPNTR_FIFOWPNTR GENMASK(7, 0)
58 #define AMnFIFOPNTR_FIFORPNTR_Y GENMASK(23, 16)
59
60 /* AXI Master Transfer Stop Register for CRU Image Data */
61 #define AMnAXISTP 0x174
62 #define AMnAXISTP_AXI_STOP BIT(0)
63
64 /* AXI Master Transfer Stop Status Register for CRU Image Data */
65 #define AMnAXISTPACK 0x178
66 #define AMnAXISTPACK_AXI_STOP_ACK BIT(0)
67
68 /* CRU Image Processing Enable Register */
69 #define ICnEN 0x200
70 #define ICnEN_ICEN BIT(0)
71
72 /* CRU Image Processing Main Control Register */
73 #define ICnMC 0x208
74 #define ICnMC_CSCTHR BIT(5)
75 #define ICnMC_INF_YUV8_422 (0x1e << 16)
76 #define ICnMC_INF_USER (0x30 << 16)
77 #define ICnMC_VCSEL(x) ((x) << 22)
78 #define ICnMC_INF_MASK GENMASK(21, 16)
79
80 /* CRU Module Status Register */
81 #define ICnMS 0x254
82 #define ICnMS_IA BIT(2)
83
84 /* CRU Data Output Mode Register */
85 #define ICnDMR 0x26c
86 #define ICnDMR_YCMODE_UYVY (1 << 4)
87
88 #define RZG2L_TIMEOUT_MS 100
89 #define RZG2L_RETRIES 10
90
91 #define RZG2L_CRU_DEFAULT_FORMAT V4L2_PIX_FMT_UYVY
92 #define RZG2L_CRU_DEFAULT_WIDTH RZG2L_CRU_MIN_INPUT_WIDTH
93 #define RZG2L_CRU_DEFAULT_HEIGHT RZG2L_CRU_MIN_INPUT_HEIGHT
94 #define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE
95 #define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
96
97 struct rzg2l_cru_buffer {
98 struct vb2_v4l2_buffer vb;
99 struct list_head list;
100 };
101
102 #define to_buf_list(vb2_buffer) \
103 (&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list)
104
105 /* -----------------------------------------------------------------------------
106 * DMA operations
107 */
rzg2l_cru_write(struct rzg2l_cru_dev * cru,u32 offset,u32 value)108 static void rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
109 {
110 iowrite32(value, cru->base + offset);
111 }
112
rzg2l_cru_read(struct rzg2l_cru_dev * cru,u32 offset)113 static u32 rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
114 {
115 return ioread32(cru->base + offset);
116 }
117
118 /* Need to hold qlock before calling */
return_unused_buffers(struct rzg2l_cru_dev * cru,enum vb2_buffer_state state)119 static void return_unused_buffers(struct rzg2l_cru_dev *cru,
120 enum vb2_buffer_state state)
121 {
122 struct rzg2l_cru_buffer *buf, *node;
123 unsigned long flags;
124 unsigned int i;
125
126 spin_lock_irqsave(&cru->qlock, flags);
127 for (i = 0; i < cru->num_buf; i++) {
128 if (cru->queue_buf[i]) {
129 vb2_buffer_done(&cru->queue_buf[i]->vb2_buf,
130 state);
131 cru->queue_buf[i] = NULL;
132 }
133 }
134
135 list_for_each_entry_safe(buf, node, &cru->buf_list, list) {
136 vb2_buffer_done(&buf->vb.vb2_buf, state);
137 list_del(&buf->list);
138 }
139 spin_unlock_irqrestore(&cru->qlock, flags);
140 }
141
rzg2l_cru_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])142 static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
143 unsigned int *nplanes, unsigned int sizes[],
144 struct device *alloc_devs[])
145 {
146 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
147
148 /* Make sure the image size is large enough. */
149 if (*nplanes)
150 return sizes[0] < cru->format.sizeimage ? -EINVAL : 0;
151
152 *nplanes = 1;
153 sizes[0] = cru->format.sizeimage;
154
155 return 0;
156 };
157
rzg2l_cru_buffer_prepare(struct vb2_buffer * vb)158 static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb)
159 {
160 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
161 unsigned long size = cru->format.sizeimage;
162
163 if (vb2_plane_size(vb, 0) < size) {
164 dev_err(cru->dev, "buffer too small (%lu < %lu)\n",
165 vb2_plane_size(vb, 0), size);
166 return -EINVAL;
167 }
168
169 vb2_set_plane_payload(vb, 0, size);
170
171 return 0;
172 }
173
rzg2l_cru_buffer_queue(struct vb2_buffer * vb)174 static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb)
175 {
176 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
177 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
178 unsigned long flags;
179
180 spin_lock_irqsave(&cru->qlock, flags);
181
182 list_add_tail(to_buf_list(vbuf), &cru->buf_list);
183
184 spin_unlock_irqrestore(&cru->qlock, flags);
185 }
186
rzg2l_cru_mc_validate_format(struct rzg2l_cru_dev * cru,struct v4l2_subdev * sd,struct media_pad * pad)187 static int rzg2l_cru_mc_validate_format(struct rzg2l_cru_dev *cru,
188 struct v4l2_subdev *sd,
189 struct media_pad *pad)
190 {
191 struct v4l2_subdev_format fmt = {
192 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
193 };
194
195 fmt.pad = pad->index;
196 if (v4l2_subdev_call_state_active(sd, pad, get_fmt, &fmt))
197 return -EPIPE;
198
199 switch (fmt.format.code) {
200 case MEDIA_BUS_FMT_UYVY8_1X16:
201 break;
202 default:
203 return -EPIPE;
204 }
205
206 switch (fmt.format.field) {
207 case V4L2_FIELD_TOP:
208 case V4L2_FIELD_BOTTOM:
209 case V4L2_FIELD_NONE:
210 case V4L2_FIELD_INTERLACED_TB:
211 case V4L2_FIELD_INTERLACED_BT:
212 case V4L2_FIELD_INTERLACED:
213 case V4L2_FIELD_SEQ_TB:
214 case V4L2_FIELD_SEQ_BT:
215 break;
216 default:
217 return -EPIPE;
218 }
219
220 if (fmt.format.width != cru->format.width ||
221 fmt.format.height != cru->format.height)
222 return -EPIPE;
223
224 return 0;
225 }
226
rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev * cru,int slot,dma_addr_t addr)227 static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
228 int slot, dma_addr_t addr)
229 {
230 /*
231 * The address needs to be 512 bytes aligned. Driver should never accept
232 * settings that do not satisfy this in the first place...
233 */
234 if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK))
235 return;
236
237 /* Currently, we just use the buffer in 32 bits address */
238 rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
239 rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
240 }
241
242 /*
243 * Moves a buffer from the queue to the HW slot. If no buffer is
244 * available use the scratch buffer. The scratch buffer is never
245 * returned to userspace, its only function is to enable the capture
246 * loop to keep running.
247 */
rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev * cru,int slot)248 static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
249 {
250 struct vb2_v4l2_buffer *vbuf;
251 struct rzg2l_cru_buffer *buf;
252 dma_addr_t phys_addr;
253
254 /* A already populated slot shall never be overwritten. */
255 if (WARN_ON(cru->queue_buf[slot]))
256 return;
257
258 dev_dbg(cru->dev, "Filling HW slot: %d\n", slot);
259
260 if (list_empty(&cru->buf_list)) {
261 cru->queue_buf[slot] = NULL;
262 phys_addr = cru->scratch_phys;
263 } else {
264 /* Keep track of buffer we give to HW */
265 buf = list_entry(cru->buf_list.next,
266 struct rzg2l_cru_buffer, list);
267 vbuf = &buf->vb;
268 list_del_init(to_buf_list(vbuf));
269 cru->queue_buf[slot] = vbuf;
270
271 /* Setup DMA */
272 phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
273 }
274
275 rzg2l_cru_set_slot_addr(cru, slot, phys_addr);
276 }
277
rzg2l_cru_initialize_axi(struct rzg2l_cru_dev * cru)278 static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
279 {
280 unsigned int slot;
281
282 /*
283 * Set image data memory banks.
284 * Currently, we will use maximum address.
285 */
286 rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1));
287
288 for (slot = 0; slot < cru->num_buf; slot++)
289 rzg2l_cru_fill_hw_slot(cru, slot);
290 }
291
rzg2l_cru_csi2_setup(struct rzg2l_cru_dev * cru,bool * input_is_yuv,struct v4l2_mbus_framefmt * ip_sd_fmt)292 static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru, bool *input_is_yuv,
293 struct v4l2_mbus_framefmt *ip_sd_fmt)
294 {
295 u32 icnmc;
296
297 switch (ip_sd_fmt->code) {
298 case MEDIA_BUS_FMT_UYVY8_1X16:
299 icnmc = ICnMC_INF_YUV8_422;
300 *input_is_yuv = true;
301 break;
302 default:
303 *input_is_yuv = false;
304 icnmc = ICnMC_INF_USER;
305 break;
306 }
307
308 icnmc |= (rzg2l_cru_read(cru, ICnMC) & ~ICnMC_INF_MASK);
309
310 /* Set virtual channel CSI2 */
311 icnmc |= ICnMC_VCSEL(cru->csi.channel);
312
313 rzg2l_cru_write(cru, ICnMC, icnmc);
314 }
315
rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev * cru,struct v4l2_mbus_framefmt * ip_sd_fmt)316 static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
317 struct v4l2_mbus_framefmt *ip_sd_fmt)
318 {
319 bool output_is_yuv = false;
320 bool input_is_yuv = false;
321 u32 icndmr;
322
323 rzg2l_cru_csi2_setup(cru, &input_is_yuv, ip_sd_fmt);
324
325 /* Output format */
326 switch (cru->format.pixelformat) {
327 case V4L2_PIX_FMT_UYVY:
328 icndmr = ICnDMR_YCMODE_UYVY;
329 output_is_yuv = true;
330 break;
331 default:
332 dev_err(cru->dev, "Invalid pixelformat (0x%x)\n",
333 cru->format.pixelformat);
334 return -EINVAL;
335 }
336
337 /* If input and output use same colorspace, do bypass mode */
338 if (output_is_yuv == input_is_yuv)
339 rzg2l_cru_write(cru, ICnMC,
340 rzg2l_cru_read(cru, ICnMC) | ICnMC_CSCTHR);
341 else
342 rzg2l_cru_write(cru, ICnMC,
343 rzg2l_cru_read(cru, ICnMC) & (~ICnMC_CSCTHR));
344
345 /* Set output data format */
346 rzg2l_cru_write(cru, ICnDMR, icndmr);
347
348 return 0;
349 }
350
rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev * cru)351 void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
352 {
353 u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
354 unsigned int retries = 0;
355 unsigned long flags;
356 u32 icnms;
357
358 spin_lock_irqsave(&cru->qlock, flags);
359
360 /* Disable and clear the interrupt */
361 rzg2l_cru_write(cru, CRUnIE, 0);
362 rzg2l_cru_write(cru, CRUnINTS, 0x001F0F0F);
363
364 /* Stop the operation of image conversion */
365 rzg2l_cru_write(cru, ICnEN, 0);
366
367 /* Wait for streaming to stop */
368 while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) {
369 spin_unlock_irqrestore(&cru->qlock, flags);
370 msleep(RZG2L_TIMEOUT_MS);
371 spin_lock_irqsave(&cru->qlock, flags);
372 }
373
374 icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA;
375 if (icnms)
376 dev_err(cru->dev, "Failed stop HW, something is seriously broken\n");
377
378 cru->state = RZG2L_CRU_DMA_STOPPED;
379
380 /* Wait until the FIFO becomes empty */
381 for (retries = 5; retries > 0; retries--) {
382 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
383
384 amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
385 amnfifopntr_r_y =
386 (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
387 if (amnfifopntr_w == amnfifopntr_r_y)
388 break;
389
390 usleep_range(10, 20);
391 }
392
393 /* Notify that FIFO is not empty here */
394 if (!retries)
395 dev_err(cru->dev, "Failed to empty FIFO\n");
396
397 /* Stop AXI bus */
398 rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP);
399
400 /* Wait until the AXI bus stop */
401 for (retries = 5; retries > 0; retries--) {
402 if (rzg2l_cru_read(cru, AMnAXISTPACK) &
403 AMnAXISTPACK_AXI_STOP_ACK)
404 break;
405
406 usleep_range(10, 20);
407 }
408
409 /* Notify that AXI bus can not stop here */
410 if (!retries)
411 dev_err(cru->dev, "Failed to stop AXI bus\n");
412
413 /* Cancel the AXI bus stop request */
414 rzg2l_cru_write(cru, AMnAXISTP, 0);
415
416 /* Reset the CRU (AXI-master) */
417 reset_control_assert(cru->aresetn);
418
419 /* Resets the image processing module */
420 rzg2l_cru_write(cru, CRUnRST, 0);
421
422 spin_unlock_irqrestore(&cru->qlock, flags);
423 }
424
rzg2l_cru_start_image_processing(struct rzg2l_cru_dev * cru)425 int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
426 {
427 struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
428 unsigned long flags;
429 int ret;
430
431 spin_lock_irqsave(&cru->qlock, flags);
432
433 /* Select a video input */
434 rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0));
435
436 /* Cancel the software reset for image processing block */
437 rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
438
439 /* Disable and clear the interrupt before using */
440 rzg2l_cru_write(cru, CRUnIE, 0);
441 rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
442
443 /* Initialize the AXI master */
444 rzg2l_cru_initialize_axi(cru);
445
446 /* Initialize image convert */
447 ret = rzg2l_cru_initialize_image_conv(cru, fmt);
448 if (ret) {
449 spin_unlock_irqrestore(&cru->qlock, flags);
450 return ret;
451 }
452
453 /* Enable interrupt */
454 rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
455
456 /* Enable image processing reception */
457 rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
458
459 spin_unlock_irqrestore(&cru->qlock, flags);
460
461 return 0;
462 }
463
rzg2l_cru_set_stream(struct rzg2l_cru_dev * cru,int on)464 static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
465 {
466 struct media_pipeline *pipe;
467 struct v4l2_subdev *sd;
468 struct media_pad *pad;
469 int ret;
470
471 pad = media_pad_remote_pad_first(&cru->pad);
472 if (!pad)
473 return -EPIPE;
474
475 sd = media_entity_to_v4l2_subdev(pad->entity);
476
477 if (!on) {
478 int stream_off_ret = 0;
479
480 ret = v4l2_subdev_call(sd, video, s_stream, 0);
481 if (ret)
482 stream_off_ret = ret;
483
484 ret = v4l2_subdev_call(sd, video, post_streamoff);
485 if (ret == -ENOIOCTLCMD)
486 ret = 0;
487 if (ret && !stream_off_ret)
488 stream_off_ret = ret;
489
490 video_device_pipeline_stop(&cru->vdev);
491
492 return stream_off_ret;
493 }
494
495 ret = rzg2l_cru_mc_validate_format(cru, sd, pad);
496 if (ret)
497 return ret;
498
499 pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe;
500 ret = video_device_pipeline_start(&cru->vdev, pipe);
501 if (ret)
502 return ret;
503
504 ret = v4l2_subdev_call(sd, video, pre_streamon, 0);
505 if (ret && ret != -ENOIOCTLCMD)
506 goto pipe_line_stop;
507
508 ret = v4l2_subdev_call(sd, video, s_stream, 1);
509 if (ret && ret != -ENOIOCTLCMD)
510 goto err_s_stream;
511
512 return 0;
513
514 err_s_stream:
515 v4l2_subdev_call(sd, video, post_streamoff);
516
517 pipe_line_stop:
518 video_device_pipeline_stop(&cru->vdev);
519
520 return ret;
521 }
522
rzg2l_cru_stop_streaming(struct rzg2l_cru_dev * cru)523 static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru)
524 {
525 cru->state = RZG2L_CRU_DMA_STOPPING;
526
527 rzg2l_cru_set_stream(cru, 0);
528 }
529
rzg2l_cru_irq(int irq,void * data)530 static irqreturn_t rzg2l_cru_irq(int irq, void *data)
531 {
532 struct rzg2l_cru_dev *cru = data;
533 unsigned int handled = 0;
534 unsigned long flags;
535 u32 irq_status;
536 u32 amnmbs;
537 int slot;
538
539 spin_lock_irqsave(&cru->qlock, flags);
540
541 irq_status = rzg2l_cru_read(cru, CRUnINTS);
542 if (!irq_status)
543 goto done;
544
545 handled = 1;
546
547 rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
548
549 /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
550 if (cru->state == RZG2L_CRU_DMA_STOPPED) {
551 dev_dbg(cru->dev, "IRQ while state stopped\n");
552 goto done;
553 }
554
555 /* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */
556 if (cru->state == RZG2L_CRU_DMA_STOPPING) {
557 if (irq_status & CRUnINTS_SFS)
558 dev_dbg(cru->dev, "IRQ while state stopping\n");
559 goto done;
560 }
561
562 /* Prepare for capture and update state */
563 amnmbs = rzg2l_cru_read(cru, AMnMBS);
564 slot = amnmbs & AMnMBS_MBSTS;
565
566 /*
567 * AMnMBS.MBSTS indicates the destination of Memory Bank (MB).
568 * Recalculate to get the current transfer complete MB.
569 */
570 if (slot == 0)
571 slot = cru->num_buf - 1;
572 else
573 slot--;
574
575 /*
576 * To hand buffers back in a known order to userspace start
577 * to capture first from slot 0.
578 */
579 if (cru->state == RZG2L_CRU_DMA_STARTING) {
580 if (slot != 0) {
581 dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
582 goto done;
583 }
584
585 dev_dbg(cru->dev, "Capture start synced!\n");
586 cru->state = RZG2L_CRU_DMA_RUNNING;
587 }
588
589 /* Capture frame */
590 if (cru->queue_buf[slot]) {
591 cru->queue_buf[slot]->field = cru->format.field;
592 cru->queue_buf[slot]->sequence = cru->sequence;
593 cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
594 vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf,
595 VB2_BUF_STATE_DONE);
596 cru->queue_buf[slot] = NULL;
597 } else {
598 /* Scratch buffer was used, dropping frame. */
599 dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
600 }
601
602 cru->sequence++;
603
604 /* Prepare for next frame */
605 rzg2l_cru_fill_hw_slot(cru, slot);
606
607 done:
608 spin_unlock_irqrestore(&cru->qlock, flags);
609
610 return IRQ_RETVAL(handled);
611 }
612
rzg2l_cru_start_streaming_vq(struct vb2_queue * vq,unsigned int count)613 static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
614 {
615 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
616 int ret;
617
618 ret = pm_runtime_resume_and_get(cru->dev);
619 if (ret)
620 return ret;
621
622 ret = clk_prepare_enable(cru->vclk);
623 if (ret)
624 goto err_pm_put;
625
626 /* Release reset state */
627 ret = reset_control_deassert(cru->aresetn);
628 if (ret) {
629 dev_err(cru->dev, "failed to deassert aresetn\n");
630 goto err_vclk_disable;
631 }
632
633 ret = reset_control_deassert(cru->presetn);
634 if (ret) {
635 reset_control_assert(cru->aresetn);
636 dev_err(cru->dev, "failed to deassert presetn\n");
637 goto assert_aresetn;
638 }
639
640 ret = request_irq(cru->image_conv_irq, rzg2l_cru_irq,
641 IRQF_SHARED, KBUILD_MODNAME, cru);
642 if (ret) {
643 dev_err(cru->dev, "failed to request irq\n");
644 goto assert_presetn;
645 }
646
647 /* Allocate scratch buffer. */
648 cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
649 &cru->scratch_phys, GFP_KERNEL);
650 if (!cru->scratch) {
651 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
652 dev_err(cru->dev, "Failed to allocate scratch buffer\n");
653 ret = -ENOMEM;
654 goto free_image_conv_irq;
655 }
656
657 cru->sequence = 0;
658
659 ret = rzg2l_cru_set_stream(cru, 1);
660 if (ret) {
661 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
662 goto out;
663 }
664
665 cru->state = RZG2L_CRU_DMA_STARTING;
666 dev_dbg(cru->dev, "Starting to capture\n");
667 return 0;
668
669 out:
670 if (ret)
671 dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch,
672 cru->scratch_phys);
673 free_image_conv_irq:
674 free_irq(cru->image_conv_irq, cru);
675
676 assert_presetn:
677 reset_control_assert(cru->presetn);
678
679 assert_aresetn:
680 reset_control_assert(cru->aresetn);
681
682 err_vclk_disable:
683 clk_disable_unprepare(cru->vclk);
684
685 err_pm_put:
686 pm_runtime_put_sync(cru->dev);
687
688 return ret;
689 }
690
rzg2l_cru_stop_streaming_vq(struct vb2_queue * vq)691 static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq)
692 {
693 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
694
695 rzg2l_cru_stop_streaming(cru);
696
697 /* Free scratch buffer */
698 dma_free_coherent(cru->dev, cru->format.sizeimage,
699 cru->scratch, cru->scratch_phys);
700
701 free_irq(cru->image_conv_irq, cru);
702 return_unused_buffers(cru, VB2_BUF_STATE_ERROR);
703
704 reset_control_assert(cru->presetn);
705 clk_disable_unprepare(cru->vclk);
706 pm_runtime_put_sync(cru->dev);
707 }
708
709 static const struct vb2_ops rzg2l_cru_qops = {
710 .queue_setup = rzg2l_cru_queue_setup,
711 .buf_prepare = rzg2l_cru_buffer_prepare,
712 .buf_queue = rzg2l_cru_buffer_queue,
713 .start_streaming = rzg2l_cru_start_streaming_vq,
714 .stop_streaming = rzg2l_cru_stop_streaming_vq,
715 .wait_prepare = vb2_ops_wait_prepare,
716 .wait_finish = vb2_ops_wait_finish,
717 };
718
rzg2l_cru_dma_unregister(struct rzg2l_cru_dev * cru)719 void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru)
720 {
721 mutex_destroy(&cru->lock);
722
723 v4l2_device_unregister(&cru->v4l2_dev);
724 vb2_queue_release(&cru->queue);
725 }
726
rzg2l_cru_dma_register(struct rzg2l_cru_dev * cru)727 int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru)
728 {
729 struct vb2_queue *q = &cru->queue;
730 unsigned int i;
731 int ret;
732
733 /* Initialize the top-level structure */
734 ret = v4l2_device_register(cru->dev, &cru->v4l2_dev);
735 if (ret)
736 return ret;
737
738 mutex_init(&cru->lock);
739 INIT_LIST_HEAD(&cru->buf_list);
740
741 spin_lock_init(&cru->qlock);
742
743 cru->state = RZG2L_CRU_DMA_STOPPED;
744
745 for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++)
746 cru->queue_buf[i] = NULL;
747
748 /* buffer queue */
749 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
750 q->io_modes = VB2_MMAP | VB2_DMABUF;
751 q->lock = &cru->lock;
752 q->drv_priv = cru;
753 q->buf_struct_size = sizeof(struct rzg2l_cru_buffer);
754 q->ops = &rzg2l_cru_qops;
755 q->mem_ops = &vb2_dma_contig_memops;
756 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
757 q->min_queued_buffers = 4;
758 q->dev = cru->dev;
759
760 ret = vb2_queue_init(q);
761 if (ret < 0) {
762 dev_err(cru->dev, "failed to initialize VB2 queue\n");
763 goto error;
764 }
765
766 return 0;
767
768 error:
769 mutex_destroy(&cru->lock);
770 v4l2_device_unregister(&cru->v4l2_dev);
771 return ret;
772 }
773
774 /* -----------------------------------------------------------------------------
775 * V4L2 stuff
776 */
777
778 static const struct v4l2_format_info rzg2l_cru_formats[] = {
779 {
780 .format = V4L2_PIX_FMT_UYVY,
781 .bpp[0] = 2,
782 },
783 };
784
rzg2l_cru_format_from_pixel(u32 format)785 const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format)
786 {
787 unsigned int i;
788
789 for (i = 0; i < ARRAY_SIZE(rzg2l_cru_formats); i++)
790 if (rzg2l_cru_formats[i].format == format)
791 return rzg2l_cru_formats + i;
792
793 return NULL;
794 }
795
rzg2l_cru_format_bytesperline(struct v4l2_pix_format * pix)796 static u32 rzg2l_cru_format_bytesperline(struct v4l2_pix_format *pix)
797 {
798 const struct v4l2_format_info *fmt;
799
800 fmt = rzg2l_cru_format_from_pixel(pix->pixelformat);
801
802 if (WARN_ON(!fmt))
803 return -EINVAL;
804
805 return pix->width * fmt->bpp[0];
806 }
807
rzg2l_cru_format_sizeimage(struct v4l2_pix_format * pix)808 static u32 rzg2l_cru_format_sizeimage(struct v4l2_pix_format *pix)
809 {
810 return pix->bytesperline * pix->height;
811 }
812
rzg2l_cru_format_align(struct rzg2l_cru_dev * cru,struct v4l2_pix_format * pix)813 static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
814 struct v4l2_pix_format *pix)
815 {
816 if (!rzg2l_cru_format_from_pixel(pix->pixelformat))
817 pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
818
819 switch (pix->field) {
820 case V4L2_FIELD_TOP:
821 case V4L2_FIELD_BOTTOM:
822 case V4L2_FIELD_NONE:
823 case V4L2_FIELD_INTERLACED_TB:
824 case V4L2_FIELD_INTERLACED_BT:
825 case V4L2_FIELD_INTERLACED:
826 break;
827 default:
828 pix->field = RZG2L_CRU_DEFAULT_FIELD;
829 break;
830 }
831
832 /* Limit to CRU capabilities */
833 v4l_bound_align_image(&pix->width, 320, RZG2L_CRU_MAX_INPUT_WIDTH, 1,
834 &pix->height, 240, RZG2L_CRU_MAX_INPUT_HEIGHT, 2, 0);
835
836 pix->bytesperline = rzg2l_cru_format_bytesperline(pix);
837 pix->sizeimage = rzg2l_cru_format_sizeimage(pix);
838
839 dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
840 pix->width, pix->height, pix->bytesperline, pix->sizeimage);
841 }
842
rzg2l_cru_try_format(struct rzg2l_cru_dev * cru,struct v4l2_pix_format * pix)843 static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru,
844 struct v4l2_pix_format *pix)
845 {
846 /*
847 * The V4L2 specification clearly documents the colorspace fields
848 * as being set by drivers for capture devices. Using the values
849 * supplied by userspace thus wouldn't comply with the API. Until
850 * the API is updated force fixed values.
851 */
852 pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
853 pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
854 pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
855 pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
856 pix->ycbcr_enc);
857
858 rzg2l_cru_format_align(cru, pix);
859 }
860
rzg2l_cru_querycap(struct file * file,void * priv,struct v4l2_capability * cap)861 static int rzg2l_cru_querycap(struct file *file, void *priv,
862 struct v4l2_capability *cap)
863 {
864 strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
865 strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card));
866
867 return 0;
868 }
869
rzg2l_cru_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)870 static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv,
871 struct v4l2_format *f)
872 {
873 struct rzg2l_cru_dev *cru = video_drvdata(file);
874
875 rzg2l_cru_try_format(cru, &f->fmt.pix);
876
877 return 0;
878 }
879
rzg2l_cru_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)880 static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv,
881 struct v4l2_format *f)
882 {
883 struct rzg2l_cru_dev *cru = video_drvdata(file);
884
885 if (vb2_is_busy(&cru->queue))
886 return -EBUSY;
887
888 rzg2l_cru_try_format(cru, &f->fmt.pix);
889
890 cru->format = f->fmt.pix;
891
892 return 0;
893 }
894
rzg2l_cru_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)895 static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv,
896 struct v4l2_format *f)
897 {
898 struct rzg2l_cru_dev *cru = video_drvdata(file);
899
900 f->fmt.pix = cru->format;
901
902 return 0;
903 }
904
rzg2l_cru_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)905 static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv,
906 struct v4l2_fmtdesc *f)
907 {
908 if (f->index >= ARRAY_SIZE(rzg2l_cru_formats))
909 return -EINVAL;
910
911 f->pixelformat = rzg2l_cru_formats[f->index].format;
912
913 return 0;
914 }
915
916 static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
917 .vidioc_querycap = rzg2l_cru_querycap,
918 .vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap,
919 .vidioc_g_fmt_vid_cap = rzg2l_cru_g_fmt_vid_cap,
920 .vidioc_s_fmt_vid_cap = rzg2l_cru_s_fmt_vid_cap,
921 .vidioc_enum_fmt_vid_cap = rzg2l_cru_enum_fmt_vid_cap,
922
923 .vidioc_reqbufs = vb2_ioctl_reqbufs,
924 .vidioc_create_bufs = vb2_ioctl_create_bufs,
925 .vidioc_querybuf = vb2_ioctl_querybuf,
926 .vidioc_qbuf = vb2_ioctl_qbuf,
927 .vidioc_dqbuf = vb2_ioctl_dqbuf,
928 .vidioc_expbuf = vb2_ioctl_expbuf,
929 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
930 .vidioc_streamon = vb2_ioctl_streamon,
931 .vidioc_streamoff = vb2_ioctl_streamoff,
932 };
933
934 /* -----------------------------------------------------------------------------
935 * Media controller file operations
936 */
937
rzg2l_cru_open(struct file * file)938 static int rzg2l_cru_open(struct file *file)
939 {
940 struct rzg2l_cru_dev *cru = video_drvdata(file);
941 int ret;
942
943 ret = mutex_lock_interruptible(&cru->lock);
944 if (ret)
945 return ret;
946
947 file->private_data = cru;
948 ret = v4l2_fh_open(file);
949 if (ret)
950 goto err_unlock;
951
952 mutex_unlock(&cru->lock);
953
954 return 0;
955
956 err_unlock:
957 mutex_unlock(&cru->lock);
958
959 return ret;
960 }
961
rzg2l_cru_release(struct file * file)962 static int rzg2l_cru_release(struct file *file)
963 {
964 struct rzg2l_cru_dev *cru = video_drvdata(file);
965 int ret;
966
967 mutex_lock(&cru->lock);
968
969 /* the release helper will cleanup any on-going streaming. */
970 ret = _vb2_fop_release(file, NULL);
971
972 mutex_unlock(&cru->lock);
973
974 return ret;
975 }
976
977 static const struct v4l2_file_operations rzg2l_cru_fops = {
978 .owner = THIS_MODULE,
979 .unlocked_ioctl = video_ioctl2,
980 .open = rzg2l_cru_open,
981 .release = rzg2l_cru_release,
982 .poll = vb2_fop_poll,
983 .mmap = vb2_fop_mmap,
984 .read = vb2_fop_read,
985 };
986
rzg2l_cru_v4l2_init(struct rzg2l_cru_dev * cru)987 static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru)
988 {
989 struct video_device *vdev = &cru->vdev;
990
991 vdev->v4l2_dev = &cru->v4l2_dev;
992 vdev->queue = &cru->queue;
993 snprintf(vdev->name, sizeof(vdev->name), "CRU output");
994 vdev->release = video_device_release_empty;
995 vdev->lock = &cru->lock;
996 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
997 vdev->device_caps |= V4L2_CAP_IO_MC;
998 vdev->fops = &rzg2l_cru_fops;
999 vdev->ioctl_ops = &rzg2l_cru_ioctl_ops;
1000
1001 /* Set a default format */
1002 cru->format.pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
1003 cru->format.width = RZG2L_CRU_DEFAULT_WIDTH;
1004 cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT;
1005 cru->format.field = RZG2L_CRU_DEFAULT_FIELD;
1006 cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
1007 rzg2l_cru_format_align(cru, &cru->format);
1008 }
1009
rzg2l_cru_video_unregister(struct rzg2l_cru_dev * cru)1010 void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru)
1011 {
1012 media_device_unregister(&cru->mdev);
1013 video_unregister_device(&cru->vdev);
1014 }
1015
rzg2l_cru_video_register(struct rzg2l_cru_dev * cru)1016 int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru)
1017 {
1018 struct video_device *vdev = &cru->vdev;
1019 int ret;
1020
1021 if (video_is_registered(&cru->vdev)) {
1022 struct media_entity *entity;
1023
1024 entity = &cru->vdev.entity;
1025 if (!entity->graph_obj.mdev)
1026 entity->graph_obj.mdev = &cru->mdev;
1027 return 0;
1028 }
1029
1030 rzg2l_cru_v4l2_init(cru);
1031 video_set_drvdata(vdev, cru);
1032 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1033 if (ret) {
1034 dev_err(cru->dev, "Failed to register video device\n");
1035 return ret;
1036 }
1037
1038 ret = media_device_register(&cru->mdev);
1039 if (ret) {
1040 video_unregister_device(&cru->vdev);
1041 return ret;
1042 }
1043
1044 return 0;
1045 }
1046