1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  *
19  */
20 /*
21  * This file contains entry functions for memory management of ISP driver
22  */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>	/* for kmap */
27 #include <linux/io.h>		/* for page_to_phys */
28 #include <linux/sysfs.h>
29 
30 #include "hmm/hmm.h"
31 #include "hmm/hmm_bo.h"
32 
33 #include "atomisp_internal.h"
34 #include "asm/cacheflush.h"
35 #include "mmu/isp_mmu.h"
36 #include "mmu/sh_mmu_mrfld.h"
37 
38 struct hmm_bo_device bo_device;
39 static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
40 static bool hmm_initialized;
41 
42 /*
43  * p: private
44  * v: vmalloc
45  */
46 static const char hmm_bo_type_string[] = "pv";
47 
bo_show(struct device * dev,struct device_attribute * attr,char * buf,struct list_head * bo_list,bool active)48 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
49 		       char *buf, struct list_head *bo_list, bool active)
50 {
51 	ssize_t ret = 0;
52 	struct hmm_buffer_object *bo;
53 	unsigned long flags;
54 	int i;
55 	long total[HMM_BO_LAST] = { 0 };
56 	long count[HMM_BO_LAST] = { 0 };
57 	int index1 = 0;
58 	int index2 = 0;
59 
60 	ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
61 	if (ret <= 0)
62 		return 0;
63 
64 	index1 += ret;
65 
66 	spin_lock_irqsave(&bo_device.list_lock, flags);
67 	list_for_each_entry(bo, bo_list, list) {
68 		if ((active && (bo->status & HMM_BO_ALLOCED)) ||
69 		    (!active && !(bo->status & HMM_BO_ALLOCED))) {
70 			ret = scnprintf(buf + index1, PAGE_SIZE - index1,
71 					"%c %d\n",
72 					hmm_bo_type_string[bo->type], bo->pgnr);
73 
74 			total[bo->type] += bo->pgnr;
75 			count[bo->type]++;
76 			if (ret > 0)
77 				index1 += ret;
78 		}
79 	}
80 	spin_unlock_irqrestore(&bo_device.list_lock, flags);
81 
82 	for (i = 0; i < HMM_BO_LAST; i++) {
83 		if (count[i]) {
84 			ret = scnprintf(buf + index1 + index2,
85 					PAGE_SIZE - index1 - index2,
86 					"%ld %c buffer objects: %ld KB\n",
87 					count[i], hmm_bo_type_string[i],
88 					total[i] * 4);
89 			if (ret > 0)
90 				index2 += ret;
91 		}
92 	}
93 
94 	/* Add trailing zero, not included by scnprintf */
95 	return index1 + index2 + 1;
96 }
97 
active_bo_show(struct device * dev,struct device_attribute * attr,char * buf)98 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
99 			      char *buf)
100 {
101 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
102 }
103 
free_bo_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
105 			    char *buf)
106 {
107 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
108 }
109 
110 
111 static DEVICE_ATTR_RO(active_bo);
112 static DEVICE_ATTR_RO(free_bo);
113 
114 static struct attribute *sysfs_attrs_ctrl[] = {
115 	&dev_attr_active_bo.attr,
116 	&dev_attr_free_bo.attr,
117 	NULL
118 };
119 
120 static struct attribute_group atomisp_attribute_group[] = {
121 	{.attrs = sysfs_attrs_ctrl },
122 };
123 
hmm_init(void)124 int hmm_init(void)
125 {
126 	int ret;
127 
128 	ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
129 				 ISP_VM_START, ISP_VM_SIZE);
130 	if (ret)
131 		dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
132 
133 	hmm_initialized = true;
134 
135 	/*
136 	 * As hmm use NULL to indicate invalid ISP virtual address,
137 	 * and ISP_VM_START is defined to 0 too, so we allocate
138 	 * one piece of dummy memory, which should return value 0,
139 	 * at the beginning, to avoid hmm_alloc return 0 in the
140 	 * further allocation.
141 	 */
142 	dummy_ptr = hmm_alloc(1);
143 
144 	if (!ret) {
145 		ret = sysfs_create_group(&atomisp_dev->kobj,
146 					 atomisp_attribute_group);
147 		if (ret)
148 			dev_err(atomisp_dev,
149 				"%s Failed to create sysfs\n", __func__);
150 	}
151 
152 	return ret;
153 }
154 
hmm_cleanup(void)155 void hmm_cleanup(void)
156 {
157 	if (dummy_ptr == mmgr_EXCEPTION)
158 		return;
159 	sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
160 
161 	/* free dummy memory first */
162 	hmm_free(dummy_ptr);
163 	dummy_ptr = 0;
164 
165 	hmm_bo_device_exit(&bo_device);
166 	hmm_initialized = false;
167 }
168 
__hmm_alloc(size_t bytes,enum hmm_bo_type type,void * vmalloc_addr)169 static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
170 			      void *vmalloc_addr)
171 {
172 	unsigned int pgnr;
173 	struct hmm_buffer_object *bo;
174 	int ret;
175 
176 	/*
177 	 * Check if we are initialized. In the ideal world we wouldn't need
178 	 * this but we can tackle it once the driver is a lot cleaner
179 	 */
180 
181 	if (!hmm_initialized)
182 		hmm_init();
183 	/* Get page number from size */
184 	pgnr = size_to_pgnr_ceil(bytes);
185 
186 	/* Buffer object structure init */
187 	bo = hmm_bo_alloc(&bo_device, pgnr);
188 	if (!bo) {
189 		dev_err(atomisp_dev, "hmm_bo_create failed.\n");
190 		goto create_bo_err;
191 	}
192 
193 	/* Allocate pages for memory */
194 	ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr);
195 	if (ret) {
196 		dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
197 		goto alloc_page_err;
198 	}
199 
200 	/* Combine the virtual address and pages together */
201 	ret = hmm_bo_bind(bo);
202 	if (ret) {
203 		dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
204 		goto bind_err;
205 	}
206 
207 	return bo->start;
208 
209 bind_err:
210 	hmm_bo_free_pages(bo);
211 alloc_page_err:
212 	hmm_bo_unref(bo);
213 create_bo_err:
214 	return 0;
215 }
216 
hmm_alloc(size_t bytes)217 ia_css_ptr hmm_alloc(size_t bytes)
218 {
219 	return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
220 }
221 
hmm_create_from_vmalloc_buf(size_t bytes,void * vmalloc_addr)222 ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr)
223 {
224 	return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr);
225 }
226 
hmm_free(ia_css_ptr virt)227 void hmm_free(ia_css_ptr virt)
228 {
229 	struct hmm_buffer_object *bo;
230 
231 	if (WARN_ON(virt == mmgr_EXCEPTION))
232 		return;
233 
234 	bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
235 
236 	if (!bo) {
237 		dev_err(atomisp_dev,
238 			"can not find buffer object start with address 0x%x\n",
239 			(unsigned int)virt);
240 		return;
241 	}
242 
243 	hmm_bo_unbind(bo);
244 	hmm_bo_free_pages(bo);
245 	hmm_bo_unref(bo);
246 }
247 
hmm_check_bo(struct hmm_buffer_object * bo,unsigned int ptr)248 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
249 {
250 	if (!bo) {
251 		dev_err(atomisp_dev,
252 			"can not find buffer object contains address 0x%x\n",
253 			ptr);
254 		return -EINVAL;
255 	}
256 
257 	if (!hmm_bo_page_allocated(bo)) {
258 		dev_err(atomisp_dev,
259 			"buffer object has no page allocated.\n");
260 		return -EINVAL;
261 	}
262 
263 	if (!hmm_bo_allocated(bo)) {
264 		dev_err(atomisp_dev,
265 			"buffer object has no virtual address space allocated.\n");
266 		return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
272 /* Read function in ISP memory management */
load_and_flush_by_kmap(ia_css_ptr virt,void * data,unsigned int bytes)273 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
274 				  unsigned int bytes)
275 {
276 	struct hmm_buffer_object *bo;
277 	unsigned int idx, offset, len;
278 	char *src, *des;
279 	int ret;
280 
281 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
282 	ret = hmm_check_bo(bo, virt);
283 	if (ret)
284 		return ret;
285 
286 	des = (char *)data;
287 	while (bytes) {
288 		idx = (virt - bo->start) >> PAGE_SHIFT;
289 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
290 
291 		src = (char *)kmap_local_page(bo->pages[idx]) + offset;
292 
293 		if ((bytes + offset) >= PAGE_SIZE) {
294 			len = PAGE_SIZE - offset;
295 			bytes -= len;
296 		} else {
297 			len = bytes;
298 			bytes = 0;
299 		}
300 
301 		virt += len;	/* update virt for next loop */
302 
303 		if (des) {
304 			memcpy(des, src, len);
305 			des += len;
306 		}
307 
308 		clflush_cache_range(src, len);
309 
310 		kunmap_local(src);
311 	}
312 
313 	return 0;
314 }
315 
316 /* Read function in ISP memory management */
load_and_flush(ia_css_ptr virt,void * data,unsigned int bytes)317 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
318 {
319 	struct hmm_buffer_object *bo;
320 	int ret;
321 
322 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
323 	ret = hmm_check_bo(bo, virt);
324 	if (ret)
325 		return ret;
326 
327 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
328 		void *src = bo->vmap_addr;
329 
330 		src += (virt - bo->start);
331 		memcpy(data, src, bytes);
332 		if (bo->status & HMM_BO_VMAPED_CACHED)
333 			clflush_cache_range(src, bytes);
334 	} else {
335 		void *vptr;
336 
337 		vptr = hmm_bo_vmap(bo, true);
338 		if (!vptr)
339 			return load_and_flush_by_kmap(virt, data, bytes);
340 		else
341 			vptr = vptr + (virt - bo->start);
342 
343 		memcpy(data, vptr, bytes);
344 		clflush_cache_range(vptr, bytes);
345 		hmm_bo_vunmap(bo);
346 	}
347 
348 	return 0;
349 }
350 
351 /* Read function in ISP memory management */
hmm_load(ia_css_ptr virt,void * data,unsigned int bytes)352 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
353 {
354 	if (!virt) {
355 		dev_warn(atomisp_dev,
356 			"hmm_store: address is NULL\n");
357 		return -EINVAL;
358 	}
359 	if (!data) {
360 		dev_err(atomisp_dev,
361 			"hmm_store: data is a NULL argument\n");
362 		return -EINVAL;
363 	}
364 	return load_and_flush(virt, data, bytes);
365 }
366 
367 /* Flush hmm data from the data cache */
hmm_flush(ia_css_ptr virt,unsigned int bytes)368 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
369 {
370 	return load_and_flush(virt, NULL, bytes);
371 }
372 
373 /* Write function in ISP memory management */
hmm_store(ia_css_ptr virt,const void * data,unsigned int bytes)374 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
375 {
376 	struct hmm_buffer_object *bo;
377 	unsigned int idx, offset, len;
378 	char *src, *des;
379 	int ret;
380 
381 	if (!virt) {
382 		dev_warn(atomisp_dev,
383 			"hmm_store: address is NULL\n");
384 		return -EINVAL;
385 	}
386 	if (!data) {
387 		dev_err(atomisp_dev,
388 			"hmm_store: data is a NULL argument\n");
389 		return -EINVAL;
390 	}
391 
392 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
393 	ret = hmm_check_bo(bo, virt);
394 	if (ret)
395 		return ret;
396 
397 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
398 		void *dst = bo->vmap_addr;
399 
400 		dst += (virt - bo->start);
401 		memcpy(dst, data, bytes);
402 		if (bo->status & HMM_BO_VMAPED_CACHED)
403 			clflush_cache_range(dst, bytes);
404 	} else {
405 		void *vptr;
406 
407 		vptr = hmm_bo_vmap(bo, true);
408 		if (vptr) {
409 			vptr = vptr + (virt - bo->start);
410 
411 			memcpy(vptr, data, bytes);
412 			clflush_cache_range(vptr, bytes);
413 			hmm_bo_vunmap(bo);
414 			return 0;
415 		}
416 	}
417 
418 	src = (char *)data;
419 	while (bytes) {
420 		idx = (virt - bo->start) >> PAGE_SHIFT;
421 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
422 
423 		des = (char *)kmap_local_page(bo->pages[idx]);
424 
425 		if (!des) {
426 			dev_err(atomisp_dev,
427 				"kmap buffer object page failed: pg_idx = %d\n",
428 				idx);
429 			return -EINVAL;
430 		}
431 
432 		des += offset;
433 
434 		if ((bytes + offset) >= PAGE_SIZE) {
435 			len = PAGE_SIZE - offset;
436 			bytes -= len;
437 		} else {
438 			len = bytes;
439 			bytes = 0;
440 		}
441 
442 		virt += len;
443 
444 		memcpy(des, src, len);
445 
446 		src += len;
447 
448 		clflush_cache_range(des, len);
449 
450 		kunmap_local(des);
451 	}
452 
453 	return 0;
454 }
455 
456 /* memset function in ISP memory management */
hmm_set(ia_css_ptr virt,int c,unsigned int bytes)457 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
458 {
459 	struct hmm_buffer_object *bo;
460 	unsigned int idx, offset, len;
461 	char *des;
462 	int ret;
463 
464 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
465 	ret = hmm_check_bo(bo, virt);
466 	if (ret)
467 		return ret;
468 
469 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
470 		void *dst = bo->vmap_addr;
471 
472 		dst += (virt - bo->start);
473 		memset(dst, c, bytes);
474 
475 		if (bo->status & HMM_BO_VMAPED_CACHED)
476 			clflush_cache_range(dst, bytes);
477 	} else {
478 		void *vptr;
479 
480 		vptr = hmm_bo_vmap(bo, true);
481 		if (vptr) {
482 			vptr = vptr + (virt - bo->start);
483 			memset(vptr, c, bytes);
484 			clflush_cache_range(vptr, bytes);
485 			hmm_bo_vunmap(bo);
486 			return 0;
487 		}
488 	}
489 
490 	while (bytes) {
491 		idx = (virt - bo->start) >> PAGE_SHIFT;
492 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
493 
494 		des = (char *)kmap_local_page(bo->pages[idx]) + offset;
495 
496 		if ((bytes + offset) >= PAGE_SIZE) {
497 			len = PAGE_SIZE - offset;
498 			bytes -= len;
499 		} else {
500 			len = bytes;
501 			bytes = 0;
502 		}
503 
504 		virt += len;
505 
506 		memset(des, c, len);
507 
508 		clflush_cache_range(des, len);
509 
510 		kunmap_local(des);
511 	}
512 
513 	return 0;
514 }
515 
516 /* Virtual address to physical address convert */
hmm_virt_to_phys(ia_css_ptr virt)517 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
518 {
519 	unsigned int idx, offset;
520 	struct hmm_buffer_object *bo;
521 
522 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
523 	if (!bo) {
524 		dev_err(atomisp_dev,
525 			"can not find buffer object contains address 0x%x\n",
526 			virt);
527 		return -1;
528 	}
529 
530 	idx = (virt - bo->start) >> PAGE_SHIFT;
531 	offset = (virt - bo->start) - (idx << PAGE_SHIFT);
532 
533 	return page_to_phys(bo->pages[idx]) + offset;
534 }
535 
hmm_mmap(struct vm_area_struct * vma,ia_css_ptr virt)536 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
537 {
538 	struct hmm_buffer_object *bo;
539 
540 	bo = hmm_bo_device_search_start(&bo_device, virt);
541 	if (!bo) {
542 		dev_err(atomisp_dev,
543 			"can not find buffer object start with address 0x%x\n",
544 			virt);
545 		return -EINVAL;
546 	}
547 
548 	return hmm_bo_mmap(vma, bo);
549 }
550 
551 /* Map ISP virtual address into IA virtual address */
hmm_vmap(ia_css_ptr virt,bool cached)552 void *hmm_vmap(ia_css_ptr virt, bool cached)
553 {
554 	struct hmm_buffer_object *bo;
555 	void *ptr;
556 
557 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
558 	if (!bo) {
559 		dev_err(atomisp_dev,
560 			"can not find buffer object contains address 0x%x\n",
561 			virt);
562 		return NULL;
563 	}
564 
565 	ptr = hmm_bo_vmap(bo, cached);
566 	if (ptr)
567 		return ptr + (virt - bo->start);
568 	else
569 		return NULL;
570 }
571 
572 /* Flush the memory which is mapped as cached memory through hmm_vmap */
hmm_flush_vmap(ia_css_ptr virt)573 void hmm_flush_vmap(ia_css_ptr virt)
574 {
575 	struct hmm_buffer_object *bo;
576 
577 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
578 	if (!bo) {
579 		dev_warn(atomisp_dev,
580 			 "can not find buffer object contains address 0x%x\n",
581 			 virt);
582 		return;
583 	}
584 
585 	hmm_bo_flush_vmap(bo);
586 }
587 
hmm_vunmap(ia_css_ptr virt)588 void hmm_vunmap(ia_css_ptr virt)
589 {
590 	struct hmm_buffer_object *bo;
591 
592 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
593 	if (!bo) {
594 		dev_warn(atomisp_dev,
595 			 "can not find buffer object contains address 0x%x\n",
596 			 virt);
597 		return;
598 	}
599 
600 	hmm_bo_vunmap(bo);
601 }
602