2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * This file contains entry functions for memory management of ISP driver
26 #include <linux/kernel.h>
27 #include <linux/types.h>
29 #include <linux/highmem.h> /* for kmap */
30 #include <linux/io.h> /* for page_to_phys */
31 #include <linux/sysfs.h>
34 #include "hmm/hmm_pool.h"
35 #include "hmm/hmm_bo.h"
37 #include "atomisp_internal.h"
38 #include "asm/cacheflush.h"
39 #include "mmu/isp_mmu.h"
40 #include "mmu/sh_mmu_mrfld.h"
42 struct hmm_bo_device bo_device;
43 struct hmm_pool dynamic_pool;
44 struct hmm_pool reserved_pool;
45 static ia_css_ptr dummy_ptr;
46 static bool hmm_initialized;
47 struct _hmm_mem_stat hmm_mem_stat;
55 static const char hmm_bo_type_string[] = "psui";
57 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
58 char *buf, struct list_head *bo_list, bool active)
61 struct hmm_buffer_object *bo;
64 long total[HMM_BO_LAST] = { 0 };
65 long count[HMM_BO_LAST] = { 0 };
69 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
75 spin_lock_irqsave(&bo_device.list_lock, flags);
76 list_for_each_entry(bo, bo_list, list) {
77 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
78 (!active && !(bo->status & HMM_BO_ALLOCED))) {
79 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
81 hmm_bo_type_string[bo->type], bo->pgnr);
83 total[bo->type] += bo->pgnr;
89 spin_unlock_irqrestore(&bo_device.list_lock, flags);
91 for (i = 0; i < HMM_BO_LAST; i++) {
93 ret = scnprintf(buf + index1 + index2,
94 PAGE_SIZE - index1 - index2,
95 "%ld %c buffer objects: %ld KB\n",
96 count[i], hmm_bo_type_string[i],
103 /* Add trailing zero, not included by scnprintf */
104 return index1 + index2 + 1;
107 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
110 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
113 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
116 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
119 static ssize_t reserved_pool_show(struct device *dev,
120 struct device_attribute *attr,
125 struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
128 if (!pinfo || !pinfo->initialized)
131 spin_lock_irqsave(&pinfo->list_lock, flags);
132 ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
133 pinfo->index, pinfo->pgnr);
134 spin_unlock_irqrestore(&pinfo->list_lock, flags);
137 ret++; /* Add trailing zero, not included by scnprintf */
142 static ssize_t dynamic_pool_show(struct device *dev,
143 struct device_attribute *attr,
148 struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
151 if (!pinfo || !pinfo->initialized)
154 spin_lock_irqsave(&pinfo->list_lock, flags);
155 ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
156 pinfo->pgnr, pinfo->pool_size);
157 spin_unlock_irqrestore(&pinfo->list_lock, flags);
160 ret++; /* Add trailing zero, not included by scnprintf */
165 static DEVICE_ATTR(active_bo, 0444, active_bo_show, NULL);
166 static DEVICE_ATTR(free_bo, 0444, free_bo_show, NULL);
167 static DEVICE_ATTR(reserved_pool, 0444, reserved_pool_show, NULL);
168 static DEVICE_ATTR(dynamic_pool, 0444, dynamic_pool_show, NULL);
170 static struct attribute *sysfs_attrs_ctrl[] = {
171 &dev_attr_active_bo.attr,
172 &dev_attr_free_bo.attr,
173 &dev_attr_reserved_pool.attr,
174 &dev_attr_dynamic_pool.attr,
178 static struct attribute_group atomisp_attribute_group[] = {
179 {.attrs = sysfs_attrs_ctrl },
186 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
187 ISP_VM_START, ISP_VM_SIZE);
189 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
191 hmm_initialized = true;
194 * As hmm use NULL to indicate invalid ISP virtual address,
195 * and ISP_VM_START is defined to 0 too, so we allocate
196 * one piece of dummy memory, which should return value 0,
197 * at the beginning, to avoid hmm_alloc return 0 in the
198 * further allocation.
200 dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
203 ret = sysfs_create_group(&atomisp_dev->kobj,
204 atomisp_attribute_group);
207 "%s Failed to create sysfs\n", __func__);
213 void hmm_cleanup(void)
215 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
217 /* free dummy memory first */
221 hmm_bo_device_exit(&bo_device);
222 hmm_initialized = false;
225 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
226 int from_highmem, void *userptr, bool cached)
229 struct hmm_buffer_object *bo;
233 * Check if we are initialized. In the ideal world we wouldn't need
234 * this but we can tackle it once the driver is a lot cleaner
237 if (!hmm_initialized)
239 /* Get page number from size */
240 pgnr = size_to_pgnr_ceil(bytes);
242 /* Buffer object structure init */
243 bo = hmm_bo_alloc(&bo_device, pgnr);
245 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
249 /* Allocate pages for memory */
250 ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
252 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
256 /* Combind the virtual address and pages togather */
257 ret = hmm_bo_bind(bo);
259 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
263 hmm_mem_stat.tol_cnt += pgnr;
268 hmm_bo_free_pages(bo);
275 void hmm_free(ia_css_ptr virt)
277 struct hmm_buffer_object *bo;
281 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
285 "can not find buffer object start with address 0x%x\n",
290 hmm_mem_stat.tol_cnt -= bo->pgnr;
293 hmm_bo_free_pages(bo);
297 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
301 "can not find buffer object contains address 0x%x\n",
306 if (!hmm_bo_page_allocated(bo)) {
308 "buffer object has no page allocated.\n");
312 if (!hmm_bo_allocated(bo)) {
314 "buffer object has no virtual address space allocated.\n");
321 /* Read function in ISP memory management */
322 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
325 struct hmm_buffer_object *bo;
326 unsigned int idx, offset, len;
330 bo = hmm_bo_device_search_in_range(&bo_device, virt);
331 ret = hmm_check_bo(bo, virt);
337 idx = (virt - bo->start) >> PAGE_SHIFT;
338 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
340 src = (char *)kmap(bo->page_obj[idx].page) + offset;
342 if ((bytes + offset) >= PAGE_SIZE) {
343 len = PAGE_SIZE - offset;
350 virt += len; /* update virt for next loop */
353 memcpy(des, src, len);
357 clflush_cache_range(src, len);
359 kunmap(bo->page_obj[idx].page);
365 /* Read function in ISP memory management */
366 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
368 struct hmm_buffer_object *bo;
371 bo = hmm_bo_device_search_in_range(&bo_device, virt);
372 ret = hmm_check_bo(bo, virt);
376 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
377 void *src = bo->vmap_addr;
379 src += (virt - bo->start);
380 memcpy(data, src, bytes);
381 if (bo->status & HMM_BO_VMAPED_CACHED)
382 clflush_cache_range(src, bytes);
386 vptr = hmm_bo_vmap(bo, true);
388 return load_and_flush_by_kmap(virt, data, bytes);
390 vptr = vptr + (virt - bo->start);
392 memcpy(data, vptr, bytes);
393 clflush_cache_range(vptr, bytes);
400 /* Read function in ISP memory management */
401 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
405 "hmm_load NULL argument\n");
408 return load_and_flush(virt, data, bytes);
411 /* Flush hmm data from the data cache */
412 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
414 return load_and_flush(virt, NULL, bytes);
417 /* Write function in ISP memory management */
418 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
420 struct hmm_buffer_object *bo;
421 unsigned int idx, offset, len;
425 bo = hmm_bo_device_search_in_range(&bo_device, virt);
426 ret = hmm_check_bo(bo, virt);
430 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
431 void *dst = bo->vmap_addr;
433 dst += (virt - bo->start);
434 memcpy(dst, data, bytes);
435 if (bo->status & HMM_BO_VMAPED_CACHED)
436 clflush_cache_range(dst, bytes);
440 vptr = hmm_bo_vmap(bo, true);
442 vptr = vptr + (virt - bo->start);
444 memcpy(vptr, data, bytes);
445 clflush_cache_range(vptr, bytes);
453 idx = (virt - bo->start) >> PAGE_SHIFT;
454 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
457 des = (char *)kmap_atomic(bo->page_obj[idx].page);
459 des = (char *)kmap(bo->page_obj[idx].page);
463 "kmap buffer object page failed: pg_idx = %d\n",
470 if ((bytes + offset) >= PAGE_SIZE) {
471 len = PAGE_SIZE - offset;
480 memcpy(des, src, len);
484 clflush_cache_range(des, len);
488 * Note: kunmap_atomic requires return addr from
489 * kmap_atomic, not the page. See linux/highmem.h
491 kunmap_atomic(des - offset);
493 kunmap(bo->page_obj[idx].page);
499 /* memset function in ISP memory management */
500 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
502 struct hmm_buffer_object *bo;
503 unsigned int idx, offset, len;
507 bo = hmm_bo_device_search_in_range(&bo_device, virt);
508 ret = hmm_check_bo(bo, virt);
512 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
513 void *dst = bo->vmap_addr;
515 dst += (virt - bo->start);
516 memset(dst, c, bytes);
518 if (bo->status & HMM_BO_VMAPED_CACHED)
519 clflush_cache_range(dst, bytes);
523 vptr = hmm_bo_vmap(bo, true);
525 vptr = vptr + (virt - bo->start);
526 memset(vptr, c, bytes);
527 clflush_cache_range(vptr, bytes);
534 idx = (virt - bo->start) >> PAGE_SHIFT;
535 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
537 des = (char *)kmap(bo->page_obj[idx].page) + offset;
539 if ((bytes + offset) >= PAGE_SIZE) {
540 len = PAGE_SIZE - offset;
551 clflush_cache_range(des, len);
553 kunmap(bo->page_obj[idx].page);
559 /* Virtual address to physical address convert */
560 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
562 unsigned int idx, offset;
563 struct hmm_buffer_object *bo;
565 bo = hmm_bo_device_search_in_range(&bo_device, virt);
568 "can not find buffer object contains address 0x%x\n",
573 idx = (virt - bo->start) >> PAGE_SHIFT;
574 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
576 return page_to_phys(bo->page_obj[idx].page) + offset;
579 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
581 struct hmm_buffer_object *bo;
583 bo = hmm_bo_device_search_start(&bo_device, virt);
586 "can not find buffer object start with address 0x%x\n",
591 return hmm_bo_mmap(vma, bo);
594 /* Map ISP virtual address into IA virtual address */
595 void *hmm_vmap(ia_css_ptr virt, bool cached)
597 struct hmm_buffer_object *bo;
600 bo = hmm_bo_device_search_in_range(&bo_device, virt);
603 "can not find buffer object contains address 0x%x\n",
608 ptr = hmm_bo_vmap(bo, cached);
610 return ptr + (virt - bo->start);
615 /* Flush the memory which is mapped as cached memory through hmm_vmap */
616 void hmm_flush_vmap(ia_css_ptr virt)
618 struct hmm_buffer_object *bo;
620 bo = hmm_bo_device_search_in_range(&bo_device, virt);
622 dev_warn(atomisp_dev,
623 "can not find buffer object contains address 0x%x\n",
628 hmm_bo_flush_vmap(bo);
631 void hmm_vunmap(ia_css_ptr virt)
633 struct hmm_buffer_object *bo;
635 bo = hmm_bo_device_search_in_range(&bo_device, virt);
637 dev_warn(atomisp_dev,
638 "can not find buffer object contains address 0x%x\n",
646 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
649 case HMM_POOL_TYPE_RESERVED:
650 reserved_pool.pops = &reserved_pops;
651 return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
653 case HMM_POOL_TYPE_DYNAMIC:
654 dynamic_pool.pops = &dynamic_pops;
655 return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
658 dev_err(atomisp_dev, "invalid pool type.\n");
663 void hmm_pool_unregister(enum hmm_pool_type pool_type)
666 case HMM_POOL_TYPE_RESERVED:
667 if (reserved_pool.pops && reserved_pool.pops->pool_exit)
668 reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
670 case HMM_POOL_TYPE_DYNAMIC:
671 if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
672 dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
675 dev_err(atomisp_dev, "invalid pool type.\n");
682 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
684 return hmm_vmap(ptr, cached);
685 /* vmunmap will be done in hmm_bo_release() */
688 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
690 struct hmm_buffer_object *bo;
692 bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
697 "can not find buffer object whose kernel virtual address is %p\n",
702 void hmm_show_mem_stat(const char *func, const int line)
704 trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
705 hmm_mem_stat.tol_cnt,
706 hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
707 hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
708 hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
711 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
713 hmm_mem_stat.res_size = res_pgnr;
714 /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
715 if (0 == hmm_mem_stat.res_size) {
716 hmm_mem_stat.res_size = -1;
717 hmm_mem_stat.res_cnt = -1;
720 /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
722 hmm_mem_stat.dyc_size = -1;
723 hmm_mem_stat.dyc_thr = -1;
725 hmm_mem_stat.dyc_size = 0;
726 hmm_mem_stat.dyc_thr = dyc_pgnr;
728 hmm_mem_stat.usr_size = 0;
729 hmm_mem_stat.sys_size = 0;
730 hmm_mem_stat.tol_cnt = 0;