1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Intel Corporation
3 
4 #include <linux/module.h>
5 #include <linux/pm_runtime.h>
6 
7 #include <media/v4l2-event.h>
8 #include <media/v4l2-ioctl.h>
9 
10 #include "ipu3.h"
11 #include "ipu3-dmamap.h"
12 
13 /******************** v4l2_subdev_ops ********************/
14 
15 #define IPU3_RUNNING_MODE_VIDEO		0
16 #define IPU3_RUNNING_MODE_STILL		1
17 
imgu_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)18 static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
19 {
20 	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
21 							struct imgu_v4l2_subdev,
22 							subdev);
23 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
24 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[imgu_sd->pipe];
25 	struct v4l2_rect try_crop = {
26 		.top = 0,
27 		.left = 0,
28 	};
29 	unsigned int i;
30 
31 	try_crop.width =
32 		imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width;
33 	try_crop.height =
34 		imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height;
35 
36 	/* Initialize try_fmt */
37 	for (i = 0; i < IMGU_NODE_NUM; i++) {
38 		struct v4l2_mbus_framefmt *try_fmt =
39 			v4l2_subdev_state_get_format(fh->state, i);
40 
41 		try_fmt->width = try_crop.width;
42 		try_fmt->height = try_crop.height;
43 		try_fmt->code = imgu_pipe->nodes[i].pad_fmt.code;
44 		try_fmt->field = V4L2_FIELD_NONE;
45 	}
46 
47 	*v4l2_subdev_state_get_crop(fh->state, IMGU_NODE_IN) = try_crop;
48 	*v4l2_subdev_state_get_compose(fh->state, IMGU_NODE_IN) = try_crop;
49 
50 	return 0;
51 }
52 
imgu_subdev_s_stream(struct v4l2_subdev * sd,int enable)53 static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
54 {
55 	int i;
56 	unsigned int node;
57 	int r = 0;
58 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
59 	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
60 							struct imgu_v4l2_subdev,
61 							subdev);
62 	unsigned int pipe = imgu_sd->pipe;
63 	struct device *dev = &imgu->pci_dev->dev;
64 	struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
65 	struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
66 	struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
67 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
68 
69 	dev_dbg(dev, "%s %d for pipe %u", __func__, enable, pipe);
70 	/* grab ctrl after streamon and return after off */
71 	v4l2_ctrl_grab(imgu_sd->ctrl, enable);
72 
73 	if (!enable) {
74 		imgu_sd->active = false;
75 		return 0;
76 	}
77 
78 	for (i = 0; i < IMGU_NODE_NUM; i++)
79 		imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled;
80 
81 	/* This is handled specially */
82 	imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
83 
84 	/* Initialize CSS formats */
85 	for (i = 0; i < IPU3_CSS_QUEUES; i++) {
86 		node = imgu_map_node(imgu, i);
87 		/* No need to reconfig meta nodes */
88 		if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
89 			continue;
90 		fmts[i] = imgu_pipe->queue_enabled[node] ?
91 			&imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
92 	}
93 
94 	/* Enable VF output only when VF queue requested by user */
95 	css_pipe->vf_output_en = false;
96 	if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
97 		css_pipe->vf_output_en = true;
98 
99 	if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
100 		css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
101 	else
102 		css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
103 
104 	dev_dbg(dev, "IPU3 pipe %u pipe_id %u", pipe, css_pipe->pipe_id);
105 
106 	rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
107 	rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
108 	rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
109 
110 	r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
111 	if (r) {
112 		dev_err(dev, "failed to set initial formats pipe %u with (%d)",
113 			pipe, r);
114 		return r;
115 	}
116 
117 	imgu_sd->active = true;
118 
119 	return 0;
120 }
121 
imgu_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)122 static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
123 			       struct v4l2_subdev_state *sd_state,
124 			       struct v4l2_subdev_format *fmt)
125 {
126 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
127 	struct v4l2_mbus_framefmt *mf;
128 	struct imgu_media_pipe *imgu_pipe;
129 	u32 pad = fmt->pad;
130 	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
131 							struct imgu_v4l2_subdev,
132 							subdev);
133 	unsigned int pipe = imgu_sd->pipe;
134 
135 	imgu_pipe = &imgu->imgu_pipe[pipe];
136 	if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
137 		fmt->format = imgu_pipe->nodes[pad].pad_fmt;
138 	} else {
139 		mf = v4l2_subdev_state_get_format(sd_state, pad);
140 		fmt->format = *mf;
141 	}
142 
143 	return 0;
144 }
145 
imgu_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)146 static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
147 			       struct v4l2_subdev_state *sd_state,
148 			       struct v4l2_subdev_format *fmt)
149 {
150 	struct imgu_media_pipe *imgu_pipe;
151 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
152 	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
153 							struct imgu_v4l2_subdev,
154 							subdev);
155 	struct v4l2_mbus_framefmt *mf;
156 	u32 pad = fmt->pad;
157 	unsigned int pipe = imgu_sd->pipe;
158 
159 	dev_dbg(&imgu->pci_dev->dev, "set subdev %u pad %u fmt to [%ux%u]",
160 		pipe, pad, fmt->format.width, fmt->format.height);
161 
162 	imgu_pipe = &imgu->imgu_pipe[pipe];
163 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
164 		mf = v4l2_subdev_state_get_format(sd_state, pad);
165 	else
166 		mf = &imgu_pipe->nodes[pad].pad_fmt;
167 
168 	fmt->format.code = mf->code;
169 	/* Clamp the w and h based on the hardware capabilities */
170 	if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
171 		fmt->format.width = clamp(fmt->format.width,
172 					  IPU3_OUTPUT_MIN_WIDTH,
173 					  IPU3_OUTPUT_MAX_WIDTH);
174 		fmt->format.height = clamp(fmt->format.height,
175 					   IPU3_OUTPUT_MIN_HEIGHT,
176 					   IPU3_OUTPUT_MAX_HEIGHT);
177 	} else {
178 		fmt->format.width = clamp(fmt->format.width,
179 					  IPU3_INPUT_MIN_WIDTH,
180 					  IPU3_INPUT_MAX_WIDTH);
181 		fmt->format.height = clamp(fmt->format.height,
182 					   IPU3_INPUT_MIN_HEIGHT,
183 					   IPU3_INPUT_MAX_HEIGHT);
184 	}
185 
186 	*mf = fmt->format;
187 
188 	return 0;
189 }
190 
191 static struct v4l2_rect *
imgu_subdev_get_crop(struct imgu_v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,unsigned int pad,enum v4l2_subdev_format_whence which)192 imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd,
193 		     struct v4l2_subdev_state *sd_state, unsigned int pad,
194 		     enum v4l2_subdev_format_whence which)
195 {
196 	if (which == V4L2_SUBDEV_FORMAT_TRY)
197 		return v4l2_subdev_state_get_crop(sd_state, pad);
198 	else
199 		return &sd->rect.eff;
200 }
201 
202 static struct v4l2_rect *
imgu_subdev_get_compose(struct imgu_v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,unsigned int pad,enum v4l2_subdev_format_whence which)203 imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd,
204 			struct v4l2_subdev_state *sd_state, unsigned int pad,
205 			enum v4l2_subdev_format_whence which)
206 {
207 	if (which == V4L2_SUBDEV_FORMAT_TRY)
208 		return v4l2_subdev_state_get_compose(sd_state, pad);
209 	else
210 		return &sd->rect.bds;
211 }
212 
imgu_subdev_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_selection * sel)213 static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
214 				     struct v4l2_subdev_state *sd_state,
215 				     struct v4l2_subdev_selection *sel)
216 {
217 	struct imgu_v4l2_subdev *imgu_sd =
218 		container_of(sd, struct imgu_v4l2_subdev, subdev);
219 
220 	if (sel->pad != IMGU_NODE_IN)
221 		return -EINVAL;
222 
223 	switch (sel->target) {
224 	case V4L2_SEL_TGT_CROP:
225 		sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
226 					       sel->which);
227 		return 0;
228 	case V4L2_SEL_TGT_COMPOSE:
229 		sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
230 						  sel->which);
231 		return 0;
232 	default:
233 		return -EINVAL;
234 	}
235 }
236 
imgu_subdev_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_selection * sel)237 static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
238 				     struct v4l2_subdev_state *sd_state,
239 				     struct v4l2_subdev_selection *sel)
240 {
241 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
242 	struct imgu_v4l2_subdev *imgu_sd =
243 		container_of(sd, struct imgu_v4l2_subdev, subdev);
244 	struct v4l2_rect *rect;
245 
246 	dev_dbg(&imgu->pci_dev->dev,
247 		 "set subdev %u sel which %u target 0x%4x rect [%ux%u]",
248 		 imgu_sd->pipe, sel->which, sel->target,
249 		 sel->r.width, sel->r.height);
250 
251 	if (sel->pad != IMGU_NODE_IN)
252 		return -EINVAL;
253 
254 	switch (sel->target) {
255 	case V4L2_SEL_TGT_CROP:
256 		rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
257 					    sel->which);
258 		break;
259 	case V4L2_SEL_TGT_COMPOSE:
260 		rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
261 					       sel->which);
262 		break;
263 	default:
264 		return -EINVAL;
265 	}
266 
267 	*rect = sel->r;
268 	return 0;
269 }
270 
271 /******************** media_entity_operations ********************/
272 
imgu_link_setup(struct media_entity * entity,const struct media_pad * local,const struct media_pad * remote,u32 flags)273 static int imgu_link_setup(struct media_entity *entity,
274 			   const struct media_pad *local,
275 			   const struct media_pad *remote, u32 flags)
276 {
277 	struct imgu_media_pipe *imgu_pipe;
278 	struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
279 					      entity);
280 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
281 	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
282 							struct imgu_v4l2_subdev,
283 							subdev);
284 	unsigned int pipe = imgu_sd->pipe;
285 	u32 pad = local->index;
286 
287 	WARN_ON(pad >= IMGU_NODE_NUM);
288 
289 	dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad,
290 		 flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
291 
292 	imgu_pipe = &imgu->imgu_pipe[pipe];
293 	imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
294 
295 	/* enable input node to enable the pipe */
296 	if (pad != IMGU_NODE_IN)
297 		return 0;
298 
299 	if (flags & MEDIA_LNK_FL_ENABLED)
300 		__set_bit(pipe, imgu->css.enabled_pipes);
301 	else
302 		__clear_bit(pipe, imgu->css.enabled_pipes);
303 
304 	dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe,
305 		 flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
306 
307 	return 0;
308 }
309 
310 /******************** vb2_ops ********************/
311 
imgu_vb2_buf_init(struct vb2_buffer * vb)312 static int imgu_vb2_buf_init(struct vb2_buffer *vb)
313 {
314 	struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
315 	struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
316 	struct imgu_buffer *buf = container_of(vb,
317 		struct imgu_buffer, vid_buf.vbb.vb2_buf);
318 	struct imgu_video_device *node =
319 		container_of(vb->vb2_queue, struct imgu_video_device, vbq);
320 	unsigned int queue = imgu_node_to_queue(node->id);
321 
322 	if (queue == IPU3_CSS_QUEUE_PARAMS)
323 		return 0;
324 
325 	return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
326 }
327 
328 /* Called when each buffer is freed */
imgu_vb2_buf_cleanup(struct vb2_buffer * vb)329 static void imgu_vb2_buf_cleanup(struct vb2_buffer *vb)
330 {
331 	struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
332 	struct imgu_buffer *buf = container_of(vb,
333 		struct imgu_buffer, vid_buf.vbb.vb2_buf);
334 	struct imgu_video_device *node =
335 		container_of(vb->vb2_queue, struct imgu_video_device, vbq);
336 	unsigned int queue = imgu_node_to_queue(node->id);
337 
338 	if (queue == IPU3_CSS_QUEUE_PARAMS)
339 		return;
340 
341 	imgu_dmamap_unmap(imgu, &buf->map);
342 }
343 
344 /* Transfer buffer ownership to me */
imgu_vb2_buf_queue(struct vb2_buffer * vb)345 static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
346 {
347 	struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
348 	struct imgu_video_device *node =
349 		container_of(vb->vb2_queue, struct imgu_video_device, vbq);
350 	unsigned int queue = imgu_node_to_queue(node->id);
351 	struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
352 					       vid_buf.vbb.vb2_buf);
353 	unsigned long need_bytes;
354 	unsigned long payload = vb2_get_plane_payload(vb, 0);
355 
356 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
357 	    vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
358 		need_bytes = node->vdev_fmt.fmt.meta.buffersize;
359 	else
360 		need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
361 
362 	if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) {
363 		dev_err(&imgu->pci_dev->dev, "invalid data size for params.");
364 		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
365 		return;
366 	}
367 
368 	mutex_lock(&imgu->lock);
369 	if (queue != IPU3_CSS_QUEUE_PARAMS)
370 		imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
371 
372 	list_add_tail(&buf->vid_buf.list, &node->buffers);
373 	mutex_unlock(&imgu->lock);
374 
375 	vb2_set_plane_payload(vb, 0, need_bytes);
376 
377 	mutex_lock(&imgu->streaming_lock);
378 	if (imgu->streaming)
379 		imgu_queue_buffers(imgu, false, node->pipe);
380 	mutex_unlock(&imgu->streaming_lock);
381 
382 	dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
383 		node->pipe, node->id);
384 }
385 
imgu_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])386 static int imgu_vb2_queue_setup(struct vb2_queue *vq,
387 				unsigned int *num_buffers,
388 				unsigned int *num_planes,
389 				unsigned int sizes[],
390 				struct device *alloc_devs[])
391 {
392 	struct imgu_device *imgu = vb2_get_drv_priv(vq);
393 	struct imgu_video_device *node =
394 		container_of(vq, struct imgu_video_device, vbq);
395 	const struct v4l2_format *fmt = &node->vdev_fmt;
396 	unsigned int size;
397 
398 	*num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME);
399 	alloc_devs[0] = &imgu->pci_dev->dev;
400 
401 	if (vq->type == V4L2_BUF_TYPE_META_CAPTURE ||
402 	    vq->type == V4L2_BUF_TYPE_META_OUTPUT)
403 		size = fmt->fmt.meta.buffersize;
404 	else
405 		size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
406 
407 	if (*num_planes) {
408 		if (sizes[0] < size)
409 			return -EINVAL;
410 		size = sizes[0];
411 	}
412 
413 	*num_planes = 1;
414 	sizes[0] = size;
415 
416 	/* Initialize buffer queue */
417 	INIT_LIST_HEAD(&node->buffers);
418 
419 	return 0;
420 }
421 
422 /* Check if all enabled video nodes are streaming, exception ignored */
imgu_all_nodes_streaming(struct imgu_device * imgu,struct imgu_video_device * except)423 static bool imgu_all_nodes_streaming(struct imgu_device *imgu,
424 				     struct imgu_video_device *except)
425 {
426 	unsigned int i, pipe, p;
427 	struct imgu_video_device *node;
428 	struct device *dev = &imgu->pci_dev->dev;
429 
430 	pipe = except->pipe;
431 	if (!test_bit(pipe, imgu->css.enabled_pipes)) {
432 		dev_warn(&imgu->pci_dev->dev,
433 			 "pipe %u link is not ready yet", pipe);
434 		return false;
435 	}
436 
437 	for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
438 		for (i = 0; i < IMGU_NODE_NUM; i++) {
439 			node = &imgu->imgu_pipe[p].nodes[i];
440 			dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u",
441 				__func__, p, i, node->name, node->enabled);
442 			if (node == except)
443 				continue;
444 			if (node->enabled && !vb2_start_streaming_called(&node->vbq))
445 				return false;
446 		}
447 	}
448 
449 	return true;
450 }
451 
imgu_return_all_buffers(struct imgu_device * imgu,struct imgu_video_device * node,enum vb2_buffer_state state)452 static void imgu_return_all_buffers(struct imgu_device *imgu,
453 				    struct imgu_video_device *node,
454 				    enum vb2_buffer_state state)
455 {
456 	struct imgu_vb2_buffer *b, *b0;
457 
458 	/* Return all buffers */
459 	mutex_lock(&imgu->lock);
460 	list_for_each_entry_safe(b, b0, &node->buffers, list) {
461 		list_del(&b->list);
462 		vb2_buffer_done(&b->vbb.vb2_buf, state);
463 	}
464 	mutex_unlock(&imgu->lock);
465 }
466 
imgu_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)467 static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
468 {
469 	struct imgu_media_pipe *imgu_pipe;
470 	struct imgu_device *imgu = vb2_get_drv_priv(vq);
471 	struct device *dev = &imgu->pci_dev->dev;
472 	struct imgu_video_device *node =
473 		container_of(vq, struct imgu_video_device, vbq);
474 	int r;
475 	unsigned int pipe;
476 
477 	dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
478 		node->name, node->pipe, node->id);
479 
480 	mutex_lock(&imgu->streaming_lock);
481 	if (imgu->streaming) {
482 		r = -EBUSY;
483 		mutex_unlock(&imgu->streaming_lock);
484 		goto fail_return_bufs;
485 	}
486 	mutex_unlock(&imgu->streaming_lock);
487 
488 	if (!node->enabled) {
489 		dev_err(dev, "IMGU node is not enabled");
490 		r = -EINVAL;
491 		goto fail_return_bufs;
492 	}
493 
494 	pipe = node->pipe;
495 	imgu_pipe = &imgu->imgu_pipe[pipe];
496 	atomic_set(&node->sequence, 0);
497 	r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline);
498 	if (r < 0)
499 		goto fail_return_bufs;
500 
501 	if (!imgu_all_nodes_streaming(imgu, node))
502 		return 0;
503 
504 	for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
505 		r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev,
506 				     video, s_stream, 1);
507 		if (r < 0)
508 			goto fail_stop_pipeline;
509 	}
510 
511 	/* Start streaming of the whole pipeline now */
512 	dev_dbg(dev, "IMGU streaming is ready to start");
513 	mutex_lock(&imgu->streaming_lock);
514 	r = imgu_s_stream(imgu, true);
515 	if (!r)
516 		imgu->streaming = true;
517 	mutex_unlock(&imgu->streaming_lock);
518 
519 	return 0;
520 
521 fail_stop_pipeline:
522 	video_device_pipeline_stop(&node->vdev);
523 fail_return_bufs:
524 	imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
525 
526 	return r;
527 }
528 
imgu_vb2_stop_streaming(struct vb2_queue * vq)529 static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
530 {
531 	struct imgu_media_pipe *imgu_pipe;
532 	struct imgu_device *imgu = vb2_get_drv_priv(vq);
533 	struct device *dev = &imgu->pci_dev->dev;
534 	struct imgu_video_device *node =
535 		container_of(vq, struct imgu_video_device, vbq);
536 	int r;
537 	unsigned int pipe;
538 	bool stop_streaming = false;
539 
540 	/* Verify that the node had been setup with imgu_v4l2_node_setup() */
541 	WARN_ON(!node->enabled);
542 
543 	pipe = node->pipe;
544 	dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
545 
546 	/*
547 	 * When the first node of a streaming setup is stopped, the entire
548 	 * pipeline needs to stop before individual nodes are disabled.
549 	 * Perform the inverse of the initial setup.
550 	 *
551 	 * Part 1 - s_stream on the entire pipeline
552 	 */
553 	mutex_lock(&imgu->streaming_lock);
554 	if (imgu->streaming) {
555 		/* Yes, really stop streaming now */
556 		dev_dbg(dev, "IMGU streaming is ready to stop");
557 		r = imgu_s_stream(imgu, false);
558 		if (!r)
559 			imgu->streaming = false;
560 		stop_streaming = true;
561 	}
562 	mutex_unlock(&imgu->streaming_lock);
563 
564 	/* Part 2 - s_stream on subdevs
565 	 *
566 	 * If we call s_stream multiple times, Linux v6.7's call_s_stream()
567 	 * WARNs and aborts. Thus, disable all pipes at once, and only once.
568 	 */
569 	if (stop_streaming) {
570 		for_each_set_bit(pipe, imgu->css.enabled_pipes,
571 				 IMGU_MAX_PIPE_NUM) {
572 			imgu_pipe = &imgu->imgu_pipe[pipe];
573 
574 			r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev,
575 					     video, s_stream, 0);
576 			if (r)
577 				dev_err(&imgu->pci_dev->dev,
578 					"failed to stop subdev streaming\n");
579 		}
580 	}
581 
582 	/* Part 3 - individual node teardown */
583 	video_device_pipeline_stop(&node->vdev);
584 	imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
585 }
586 
587 /******************** v4l2_ioctl_ops ********************/
588 
589 #define VID_CAPTURE	0
590 #define VID_OUTPUT	1
591 #define DEF_VID_CAPTURE	0
592 #define DEF_VID_OUTPUT	1
593 
594 struct imgu_fmt {
595 	u32	fourcc;
596 	u16	type; /* VID_CAPTURE or VID_OUTPUT not both */
597 };
598 
599 /* format descriptions for capture and preview */
600 static const struct imgu_fmt formats[] = {
601 	{ V4L2_PIX_FMT_NV12, VID_CAPTURE },
602 	{ V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT },
603 	{ V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT },
604 	{ V4L2_PIX_FMT_IPU3_SGBRG10, VID_OUTPUT },
605 	{ V4L2_PIX_FMT_IPU3_SRGGB10, VID_OUTPUT },
606 };
607 
608 /* Find the first matched format, return default if not found */
find_format(struct v4l2_format * f,u32 type)609 static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
610 {
611 	unsigned int i;
612 
613 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
614 		if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
615 		    formats[i].type == type)
616 			return &formats[i];
617 	}
618 
619 	return type == VID_CAPTURE ? &formats[DEF_VID_CAPTURE] :
620 				     &formats[DEF_VID_OUTPUT];
621 }
622 
imgu_vidioc_querycap(struct file * file,void * fh,struct v4l2_capability * cap)623 static int imgu_vidioc_querycap(struct file *file, void *fh,
624 				struct v4l2_capability *cap)
625 {
626 	struct imgu_device *imgu = video_drvdata(file);
627 
628 	strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
629 	strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
630 	snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
631 		 pci_name(imgu->pci_dev));
632 
633 	return 0;
634 }
635 
enum_fmts(struct v4l2_fmtdesc * f,u32 type)636 static int enum_fmts(struct v4l2_fmtdesc *f, u32 type)
637 {
638 	unsigned int i, j;
639 
640 	if (f->mbus_code != 0 && f->mbus_code != MEDIA_BUS_FMT_FIXED)
641 		return -EINVAL;
642 
643 	for (i = j = 0; i < ARRAY_SIZE(formats); ++i) {
644 		if (formats[i].type == type) {
645 			if (j == f->index)
646 				break;
647 			++j;
648 		}
649 	}
650 
651 	if (i < ARRAY_SIZE(formats)) {
652 		f->pixelformat = formats[i].fourcc;
653 		return 0;
654 	}
655 
656 	return -EINVAL;
657 }
658 
vidioc_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)659 static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
660 				   struct v4l2_fmtdesc *f)
661 {
662 	if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
663 		return -EINVAL;
664 
665 	return enum_fmts(f, VID_CAPTURE);
666 }
667 
vidioc_enum_fmt_vid_out(struct file * file,void * priv,struct v4l2_fmtdesc * f)668 static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
669 				   struct v4l2_fmtdesc *f)
670 {
671 	if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
672 		return -EINVAL;
673 
674 	return enum_fmts(f, VID_OUTPUT);
675 }
676 
677 /* Propagate forward always the format from the CIO2 subdev */
imgu_vidioc_g_fmt(struct file * file,void * fh,struct v4l2_format * f)678 static int imgu_vidioc_g_fmt(struct file *file, void *fh,
679 			     struct v4l2_format *f)
680 {
681 	struct imgu_video_device *node = file_to_intel_imgu_node(file);
682 
683 	f->fmt = node->vdev_fmt.fmt;
684 
685 	return 0;
686 }
687 
688 /*
689  * Set input/output format. Unless it is just a try, this also resets
690  * selections (ie. effective and BDS resolutions) to defaults.
691  */
imgu_fmt(struct imgu_device * imgu,unsigned int pipe,int node,struct v4l2_format * f,bool try)692 static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
693 		    struct v4l2_format *f, bool try)
694 {
695 	struct device *dev = &imgu->pci_dev->dev;
696 	struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
697 	struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
698 	struct v4l2_mbus_framefmt pad_fmt;
699 	unsigned int i, css_q;
700 	int ret;
701 	struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
702 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
703 	struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
704 
705 	dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try);
706 
707 	for (i = 0; i < IMGU_NODE_NUM; i++)
708 		dev_dbg(dev, "IMGU pipe %u node %u enabled = %u",
709 			pipe, i, imgu_pipe->nodes[i].enabled);
710 
711 	if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
712 		css_pipe->vf_output_en = true;
713 
714 	if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
715 		css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
716 	else
717 		css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
718 
719 	dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
720 
721 	css_q = imgu_node_to_queue(node);
722 	for (i = 0; i < IPU3_CSS_QUEUES; i++) {
723 		unsigned int inode = imgu_map_node(imgu, i);
724 
725 		/* Skip the meta node */
726 		if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
727 			continue;
728 
729 		/* CSS expects some format on OUT queue */
730 		if (i != IPU3_CSS_QUEUE_OUT &&
731 		    !imgu_pipe->nodes[inode].enabled && !try) {
732 			fmts[i] = NULL;
733 			continue;
734 		}
735 
736 		if (i == css_q) {
737 			fmts[i] = &f->fmt.pix_mp;
738 			continue;
739 		}
740 
741 		if (try) {
742 			fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
743 					  sizeof(struct v4l2_pix_format_mplane),
744 					  GFP_KERNEL);
745 			if (!fmts[i]) {
746 				ret = -ENOMEM;
747 				goto out;
748 			}
749 		} else {
750 			fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
751 		}
752 
753 	}
754 
755 	if (!try) {
756 		/* eff and bds res got by imgu_s_sel */
757 		struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
758 
759 		rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
760 		rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
761 		rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
762 
763 		/* suppose that pad fmt was set by subdev s_fmt before */
764 		pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt;
765 		rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width;
766 		rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
767 	}
768 
769 	if (!fmts[css_q]) {
770 		ret = -EINVAL;
771 		goto out;
772 	}
773 
774 	if (try)
775 		ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
776 	else
777 		ret = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
778 
779 	/* ret is the binary number in the firmware blob */
780 	if (ret < 0)
781 		goto out;
782 
783 	/*
784 	 * imgu doesn't set the node to the value given by user
785 	 * before we return success from this function, so set it here.
786 	 */
787 	if (!try)
788 		imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
789 
790 out:
791 	if (try) {
792 		for (i = 0; i < IPU3_CSS_QUEUES; i++)
793 			if (i != css_q)
794 				kfree(fmts[i]);
795 	}
796 
797 	return ret;
798 }
799 
imgu_try_fmt(struct file * file,void * fh,struct v4l2_format * f)800 static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
801 {
802 	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
803 	const struct imgu_fmt *fmt;
804 
805 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
806 		fmt = find_format(f, VID_CAPTURE);
807 	else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
808 		fmt = find_format(f, VID_OUTPUT);
809 	else
810 		return -EINVAL;
811 
812 	pixm->pixelformat = fmt->fourcc;
813 
814 	return 0;
815 }
816 
imgu_vidioc_try_fmt(struct file * file,void * fh,struct v4l2_format * f)817 static int imgu_vidioc_try_fmt(struct file *file, void *fh,
818 			       struct v4l2_format *f)
819 {
820 	struct imgu_device *imgu = video_drvdata(file);
821 	struct device *dev = &imgu->pci_dev->dev;
822 	struct imgu_video_device *node = file_to_intel_imgu_node(file);
823 	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
824 	int r;
825 
826 	dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
827 		pix_mp->width, pix_mp->height, node->id);
828 
829 	r = imgu_try_fmt(file, fh, f);
830 	if (r)
831 		return r;
832 
833 	return imgu_fmt(imgu, node->pipe, node->id, f, true);
834 }
835 
imgu_vidioc_s_fmt(struct file * file,void * fh,struct v4l2_format * f)836 static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
837 {
838 	struct imgu_device *imgu = video_drvdata(file);
839 	struct device *dev = &imgu->pci_dev->dev;
840 	struct imgu_video_device *node = file_to_intel_imgu_node(file);
841 	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
842 	int r;
843 
844 	dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
845 		pix_mp->width, pix_mp->height, node->id);
846 
847 	r = imgu_try_fmt(file, fh, f);
848 	if (r)
849 		return r;
850 
851 	return imgu_fmt(imgu, node->pipe, node->id, f, false);
852 }
853 
854 struct imgu_meta_fmt {
855 	__u32 fourcc;
856 	char *name;
857 };
858 
859 /* From drivers/media/v4l2-core/v4l2-ioctl.c */
860 static const struct imgu_meta_fmt meta_fmts[] = {
861 	{ V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" },
862 	{ V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" },
863 };
864 
imgu_meta_enum_format(struct file * file,void * fh,struct v4l2_fmtdesc * fmt)865 static int imgu_meta_enum_format(struct file *file, void *fh,
866 				 struct v4l2_fmtdesc *fmt)
867 {
868 	struct imgu_video_device *node = file_to_intel_imgu_node(file);
869 	unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1;
870 
871 	/* Each node is dedicated to only one meta format */
872 	if (fmt->index > 0 || fmt->type != node->vbq.type)
873 		return -EINVAL;
874 
875 	if (fmt->mbus_code != 0 && fmt->mbus_code != MEDIA_BUS_FMT_FIXED)
876 		return -EINVAL;
877 
878 	strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description));
879 	fmt->pixelformat = meta_fmts[i].fourcc;
880 
881 	return 0;
882 }
883 
imgu_vidioc_g_meta_fmt(struct file * file,void * fh,struct v4l2_format * f)884 static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
885 				  struct v4l2_format *f)
886 {
887 	struct imgu_video_device *node = file_to_intel_imgu_node(file);
888 
889 	if (f->type != node->vbq.type)
890 		return -EINVAL;
891 
892 	f->fmt = node->vdev_fmt.fmt;
893 
894 	return 0;
895 }
896 
897 /******************** function pointers ********************/
898 
899 static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
900 	.open = imgu_subdev_open,
901 };
902 
903 static const struct v4l2_subdev_core_ops imgu_subdev_core_ops = {
904 	.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
905 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
906 };
907 
908 static const struct v4l2_subdev_video_ops imgu_subdev_video_ops = {
909 	.s_stream = imgu_subdev_s_stream,
910 };
911 
912 static const struct v4l2_subdev_pad_ops imgu_subdev_pad_ops = {
913 	.link_validate = v4l2_subdev_link_validate_default,
914 	.get_fmt = imgu_subdev_get_fmt,
915 	.set_fmt = imgu_subdev_set_fmt,
916 	.get_selection = imgu_subdev_get_selection,
917 	.set_selection = imgu_subdev_set_selection,
918 };
919 
920 static const struct v4l2_subdev_ops imgu_subdev_ops = {
921 	.core = &imgu_subdev_core_ops,
922 	.video = &imgu_subdev_video_ops,
923 	.pad = &imgu_subdev_pad_ops,
924 };
925 
926 static const struct media_entity_operations imgu_media_ops = {
927 	.link_setup = imgu_link_setup,
928 	.link_validate = v4l2_subdev_link_validate,
929 };
930 
931 /****************** vb2_ops of the Q ********************/
932 
933 static const struct vb2_ops imgu_vb2_ops = {
934 	.buf_init = imgu_vb2_buf_init,
935 	.buf_cleanup = imgu_vb2_buf_cleanup,
936 	.buf_queue = imgu_vb2_buf_queue,
937 	.queue_setup = imgu_vb2_queue_setup,
938 	.start_streaming = imgu_vb2_start_streaming,
939 	.stop_streaming = imgu_vb2_stop_streaming,
940 	.wait_prepare = vb2_ops_wait_prepare,
941 	.wait_finish = vb2_ops_wait_finish,
942 };
943 
944 /****************** v4l2_file_operations *****************/
945 
946 static const struct v4l2_file_operations imgu_v4l2_fops = {
947 	.unlocked_ioctl = video_ioctl2,
948 	.open = v4l2_fh_open,
949 	.release = vb2_fop_release,
950 	.poll = vb2_fop_poll,
951 	.mmap = vb2_fop_mmap,
952 };
953 
954 /******************** v4l2_ioctl_ops ********************/
955 
956 static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
957 	.vidioc_querycap = imgu_vidioc_querycap,
958 
959 	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
960 	.vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
961 	.vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
962 	.vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
963 
964 	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
965 	.vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
966 	.vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
967 	.vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
968 
969 	/* buffer queue management */
970 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
971 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
972 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
973 	.vidioc_querybuf = vb2_ioctl_querybuf,
974 	.vidioc_qbuf = vb2_ioctl_qbuf,
975 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
976 	.vidioc_streamon = vb2_ioctl_streamon,
977 	.vidioc_streamoff = vb2_ioctl_streamoff,
978 	.vidioc_expbuf = vb2_ioctl_expbuf,
979 };
980 
981 static const struct v4l2_ioctl_ops imgu_v4l2_meta_ioctl_ops = {
982 	.vidioc_querycap = imgu_vidioc_querycap,
983 
984 	/* meta capture */
985 	.vidioc_enum_fmt_meta_cap = imgu_meta_enum_format,
986 	.vidioc_g_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
987 	.vidioc_s_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
988 	.vidioc_try_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
989 
990 	/* meta output */
991 	.vidioc_enum_fmt_meta_out = imgu_meta_enum_format,
992 	.vidioc_g_fmt_meta_out = imgu_vidioc_g_meta_fmt,
993 	.vidioc_s_fmt_meta_out = imgu_vidioc_g_meta_fmt,
994 	.vidioc_try_fmt_meta_out = imgu_vidioc_g_meta_fmt,
995 
996 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
997 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
998 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
999 	.vidioc_querybuf = vb2_ioctl_querybuf,
1000 	.vidioc_qbuf = vb2_ioctl_qbuf,
1001 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1002 	.vidioc_streamon = vb2_ioctl_streamon,
1003 	.vidioc_streamoff = vb2_ioctl_streamoff,
1004 	.vidioc_expbuf = vb2_ioctl_expbuf,
1005 };
1006 
imgu_sd_s_ctrl(struct v4l2_ctrl * ctrl)1007 static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl)
1008 {
1009 	struct imgu_v4l2_subdev *imgu_sd =
1010 		container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
1011 	struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
1012 	struct device *dev = &imgu->pci_dev->dev;
1013 
1014 	dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %u",
1015 		ctrl->val, ctrl->id, imgu_sd->pipe);
1016 
1017 	switch (ctrl->id) {
1018 	case V4L2_CID_INTEL_IPU3_MODE:
1019 		atomic_set(&imgu_sd->running_mode, ctrl->val);
1020 		return 0;
1021 	default:
1022 		return -EINVAL;
1023 	}
1024 }
1025 
1026 static const struct v4l2_ctrl_ops imgu_subdev_ctrl_ops = {
1027 	.s_ctrl = imgu_sd_s_ctrl,
1028 };
1029 
1030 static const char * const imgu_ctrl_mode_strings[] = {
1031 	"Video mode",
1032 	"Still mode",
1033 };
1034 
1035 static const struct v4l2_ctrl_config imgu_subdev_ctrl_mode = {
1036 	.ops = &imgu_subdev_ctrl_ops,
1037 	.id = V4L2_CID_INTEL_IPU3_MODE,
1038 	.name = "IPU3 Pipe Mode",
1039 	.type = V4L2_CTRL_TYPE_MENU,
1040 	.max = ARRAY_SIZE(imgu_ctrl_mode_strings) - 1,
1041 	.def = IPU3_RUNNING_MODE_VIDEO,
1042 	.qmenu = imgu_ctrl_mode_strings,
1043 };
1044 
1045 /******************** Framework registration ********************/
1046 
1047 /* helper function to config node's video properties */
imgu_node_to_v4l2(u32 node,struct video_device * vdev,struct v4l2_format * f)1048 static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
1049 			      struct v4l2_format *f)
1050 {
1051 	u32 cap;
1052 
1053 	/* Should not happen */
1054 	WARN_ON(node >= IMGU_NODE_NUM);
1055 
1056 	switch (node) {
1057 	case IMGU_NODE_IN:
1058 		cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE;
1059 		f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1060 		vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
1061 		break;
1062 	case IMGU_NODE_PARAMS:
1063 		cap = V4L2_CAP_META_OUTPUT;
1064 		f->type = V4L2_BUF_TYPE_META_OUTPUT;
1065 		f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS;
1066 		vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
1067 		imgu_css_meta_fmt_set(&f->fmt.meta);
1068 		break;
1069 	case IMGU_NODE_STAT_3A:
1070 		cap = V4L2_CAP_META_CAPTURE;
1071 		f->type = V4L2_BUF_TYPE_META_CAPTURE;
1072 		f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A;
1073 		vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
1074 		imgu_css_meta_fmt_set(&f->fmt.meta);
1075 		break;
1076 	default:
1077 		cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE;
1078 		f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1079 		vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
1080 	}
1081 
1082 	vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | cap;
1083 }
1084 
imgu_v4l2_subdev_register(struct imgu_device * imgu,struct imgu_v4l2_subdev * imgu_sd,unsigned int pipe)1085 static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
1086 				     struct imgu_v4l2_subdev *imgu_sd,
1087 				     unsigned int pipe)
1088 {
1089 	int i, r;
1090 	struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler;
1091 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
1092 
1093 	/* Initialize subdev media entity */
1094 	imgu_sd->subdev.entity.ops = &imgu_media_ops;
1095 	for (i = 0; i < IMGU_NODE_NUM; i++) {
1096 		imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
1097 			MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
1098 	}
1099 	r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
1100 				   imgu_sd->subdev_pads);
1101 	if (r) {
1102 		dev_err(&imgu->pci_dev->dev,
1103 			"failed initialize subdev media entity (%d)\n", r);
1104 		return r;
1105 	}
1106 
1107 	/* Initialize subdev */
1108 	v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
1109 	imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS;
1110 	imgu_sd->subdev.internal_ops = &imgu_subdev_internal_ops;
1111 	imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
1112 				V4L2_SUBDEV_FL_HAS_EVENTS;
1113 	snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
1114 		 "%s %u", IMGU_NAME, pipe);
1115 	v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
1116 	atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
1117 	v4l2_ctrl_handler_init(hdl, 1);
1118 	imgu_sd->subdev.ctrl_handler = hdl;
1119 	imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &imgu_subdev_ctrl_mode, NULL);
1120 	if (hdl->error) {
1121 		r = hdl->error;
1122 		dev_err(&imgu->pci_dev->dev,
1123 			"failed to create subdev v4l2 ctrl with err %d", r);
1124 		goto fail_subdev;
1125 	}
1126 	r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev);
1127 	if (r) {
1128 		dev_err(&imgu->pci_dev->dev,
1129 			"failed initialize subdev (%d)\n", r);
1130 		goto fail_subdev;
1131 	}
1132 
1133 	imgu_sd->pipe = pipe;
1134 	return 0;
1135 
1136 fail_subdev:
1137 	v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler);
1138 	media_entity_cleanup(&imgu_sd->subdev.entity);
1139 
1140 	return r;
1141 }
1142 
imgu_v4l2_node_setup(struct imgu_device * imgu,unsigned int pipe,int node_num)1143 static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
1144 				int node_num)
1145 {
1146 	int r;
1147 	u32 flags;
1148 	struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
1149 	struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
1150 	struct device *dev = &imgu->pci_dev->dev;
1151 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
1152 	struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev;
1153 	struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
1154 	struct video_device *vdev = &node->vdev;
1155 	struct vb2_queue *vbq = &node->vbq;
1156 
1157 	/* Initialize formats to default values */
1158 	def_bus_fmt.width = 1920;
1159 	def_bus_fmt.height = 1080;
1160 	def_bus_fmt.code = MEDIA_BUS_FMT_FIXED;
1161 	def_bus_fmt.field = V4L2_FIELD_NONE;
1162 	def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW;
1163 	def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1164 	def_bus_fmt.quantization = V4L2_QUANTIZATION_DEFAULT;
1165 	def_bus_fmt.xfer_func = V4L2_XFER_FUNC_DEFAULT;
1166 
1167 	def_pix_fmt.width = def_bus_fmt.width;
1168 	def_pix_fmt.height = def_bus_fmt.height;
1169 	def_pix_fmt.field = def_bus_fmt.field;
1170 	def_pix_fmt.num_planes = 1;
1171 	def_pix_fmt.plane_fmt[0].bytesperline =
1172 		imgu_bytesperline(def_pix_fmt.width,
1173 				  IMGU_ABI_FRAME_FORMAT_RAW_PACKED);
1174 	def_pix_fmt.plane_fmt[0].sizeimage =
1175 		def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
1176 	def_pix_fmt.flags = 0;
1177 	def_pix_fmt.colorspace = def_bus_fmt.colorspace;
1178 	def_pix_fmt.ycbcr_enc = def_bus_fmt.ycbcr_enc;
1179 	def_pix_fmt.quantization = def_bus_fmt.quantization;
1180 	def_pix_fmt.xfer_func = def_bus_fmt.xfer_func;
1181 
1182 	/* Initialize miscellaneous variables */
1183 	mutex_init(&node->lock);
1184 	INIT_LIST_HEAD(&node->buffers);
1185 
1186 	/* Initialize formats to default values */
1187 	node->pad_fmt = def_bus_fmt;
1188 	node->id = node_num;
1189 	node->pipe = pipe;
1190 	imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
1191 	if (node->vdev_fmt.type ==
1192 	    V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
1193 	    node->vdev_fmt.type ==
1194 	    V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1195 		def_pix_fmt.pixelformat = node->output ?
1196 			V4L2_PIX_FMT_IPU3_SGRBG10 :
1197 			V4L2_PIX_FMT_NV12;
1198 		node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
1199 	}
1200 
1201 	/* Initialize media entities */
1202 	node->vdev_pad.flags = node->output ?
1203 		MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
1204 	vdev->entity.ops = NULL;
1205 	r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
1206 	if (r) {
1207 		dev_err(dev, "failed initialize media entity (%d)\n", r);
1208 		mutex_destroy(&node->lock);
1209 		return r;
1210 	}
1211 
1212 	/* Initialize vbq */
1213 	vbq->type = node->vdev_fmt.type;
1214 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1215 	vbq->ops = &imgu_vb2_ops;
1216 	vbq->mem_ops = &vb2_dma_sg_memops;
1217 	if (imgu->buf_struct_size <= 0)
1218 		imgu->buf_struct_size =
1219 			sizeof(struct imgu_vb2_buffer);
1220 	vbq->buf_struct_size = imgu->buf_struct_size;
1221 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1222 	/* can streamon w/o buffers */
1223 	vbq->min_queued_buffers = 0;
1224 	vbq->drv_priv = imgu;
1225 	vbq->lock = &node->lock;
1226 	r = vb2_queue_init(vbq);
1227 	if (r) {
1228 		dev_err(dev, "failed to initialize video queue (%d)", r);
1229 		media_entity_cleanup(&vdev->entity);
1230 		return r;
1231 	}
1232 
1233 	/* Initialize vdev */
1234 	snprintf(vdev->name, sizeof(vdev->name), "%s %u %s",
1235 		 IMGU_NAME, pipe, node->name);
1236 	vdev->release = video_device_release_empty;
1237 	vdev->fops = &imgu_v4l2_fops;
1238 	vdev->lock = &node->lock;
1239 	vdev->v4l2_dev = &imgu->v4l2_dev;
1240 	vdev->queue = &node->vbq;
1241 	vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
1242 	video_set_drvdata(vdev, imgu);
1243 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1244 	if (r) {
1245 		dev_err(dev, "failed to register video device (%d)", r);
1246 		media_entity_cleanup(&vdev->entity);
1247 		return r;
1248 	}
1249 
1250 	/* Create link between video node and the subdev pad */
1251 	flags = 0;
1252 	if (node->enabled)
1253 		flags |= MEDIA_LNK_FL_ENABLED;
1254 	if (node->output) {
1255 		r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
1256 					  node_num, flags);
1257 	} else {
1258 		if (node->id == IMGU_NODE_OUT) {
1259 			flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
1260 			node->enabled = true;
1261 		}
1262 
1263 		r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
1264 					  0, flags);
1265 	}
1266 	if (r) {
1267 		dev_err(dev, "failed to create pad link (%d)", r);
1268 		video_unregister_device(vdev);
1269 		return r;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
imgu_v4l2_nodes_cleanup_pipe(struct imgu_device * imgu,unsigned int pipe,int node)1275 static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
1276 					 unsigned int pipe, int node)
1277 {
1278 	int i;
1279 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
1280 
1281 	for (i = 0; i < node; i++) {
1282 		video_unregister_device(&imgu_pipe->nodes[i].vdev);
1283 		media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity);
1284 		mutex_destroy(&imgu_pipe->nodes[i].lock);
1285 	}
1286 }
1287 
imgu_v4l2_nodes_setup_pipe(struct imgu_device * imgu,int pipe)1288 static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
1289 {
1290 	int i;
1291 
1292 	for (i = 0; i < IMGU_NODE_NUM; i++) {
1293 		int r = imgu_v4l2_node_setup(imgu, pipe, i);
1294 
1295 		if (r) {
1296 			imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
1297 			return r;
1298 		}
1299 	}
1300 	return 0;
1301 }
1302 
imgu_v4l2_subdev_cleanup(struct imgu_device * imgu,unsigned int i)1303 static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
1304 {
1305 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
1306 
1307 	v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev);
1308 	v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler);
1309 	media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
1310 }
1311 
imgu_v4l2_cleanup_pipes(struct imgu_device * imgu,unsigned int pipe)1312 static void imgu_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
1313 {
1314 	int i;
1315 
1316 	for (i = 0; i < pipe; i++) {
1317 		imgu_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
1318 		imgu_v4l2_subdev_cleanup(imgu, i);
1319 	}
1320 }
1321 
imgu_v4l2_register_pipes(struct imgu_device * imgu)1322 static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
1323 {
1324 	struct imgu_media_pipe *imgu_pipe;
1325 	int i, r;
1326 
1327 	for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
1328 		imgu_pipe = &imgu->imgu_pipe[i];
1329 		r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
1330 		if (r) {
1331 			dev_err(&imgu->pci_dev->dev,
1332 				"failed to register subdev%u ret (%d)\n", i, r);
1333 			goto pipes_cleanup;
1334 		}
1335 		r = imgu_v4l2_nodes_setup_pipe(imgu, i);
1336 		if (r) {
1337 			imgu_v4l2_subdev_cleanup(imgu, i);
1338 			goto pipes_cleanup;
1339 		}
1340 	}
1341 
1342 	return 0;
1343 
1344 pipes_cleanup:
1345 	imgu_v4l2_cleanup_pipes(imgu, i);
1346 	return r;
1347 }
1348 
imgu_v4l2_register(struct imgu_device * imgu)1349 int imgu_v4l2_register(struct imgu_device *imgu)
1350 {
1351 	int r;
1352 
1353 	/* Initialize miscellaneous variables */
1354 	imgu->streaming = false;
1355 
1356 	/* Set up media device */
1357 	media_device_pci_init(&imgu->media_dev, imgu->pci_dev, IMGU_NAME);
1358 
1359 	/* Set up v4l2 device */
1360 	imgu->v4l2_dev.mdev = &imgu->media_dev;
1361 	imgu->v4l2_dev.ctrl_handler = NULL;
1362 	r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
1363 	if (r) {
1364 		dev_err(&imgu->pci_dev->dev,
1365 			"failed to register V4L2 device (%d)\n", r);
1366 		goto fail_v4l2_dev;
1367 	}
1368 
1369 	r = imgu_v4l2_register_pipes(imgu);
1370 	if (r) {
1371 		dev_err(&imgu->pci_dev->dev,
1372 			"failed to register pipes (%d)\n", r);
1373 		goto fail_v4l2_pipes;
1374 	}
1375 
1376 	r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
1377 	if (r) {
1378 		dev_err(&imgu->pci_dev->dev,
1379 			"failed to register subdevs (%d)\n", r);
1380 		goto fail_subdevs;
1381 	}
1382 
1383 	r = media_device_register(&imgu->media_dev);
1384 	if (r) {
1385 		dev_err(&imgu->pci_dev->dev,
1386 			"failed to register media device (%d)\n", r);
1387 		goto fail_subdevs;
1388 	}
1389 
1390 	return 0;
1391 
1392 fail_subdevs:
1393 	imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
1394 fail_v4l2_pipes:
1395 	v4l2_device_unregister(&imgu->v4l2_dev);
1396 fail_v4l2_dev:
1397 	media_device_cleanup(&imgu->media_dev);
1398 
1399 	return r;
1400 }
1401 
imgu_v4l2_unregister(struct imgu_device * imgu)1402 int imgu_v4l2_unregister(struct imgu_device *imgu)
1403 {
1404 	media_device_unregister(&imgu->media_dev);
1405 	imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
1406 	v4l2_device_unregister(&imgu->v4l2_dev);
1407 	media_device_cleanup(&imgu->media_dev);
1408 
1409 	return 0;
1410 }
1411 
imgu_v4l2_buffer_done(struct vb2_buffer * vb,enum vb2_buffer_state state)1412 void imgu_v4l2_buffer_done(struct vb2_buffer *vb,
1413 			   enum vb2_buffer_state state)
1414 {
1415 	struct imgu_vb2_buffer *b =
1416 		container_of(vb, struct imgu_vb2_buffer, vbb.vb2_buf);
1417 
1418 	list_del(&b->list);
1419 	vb2_buffer_done(&b->vbb.vb2_buf, state);
1420 }
1421