GNU Linux-libre 4.14.302-gnu1
[releases.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  */
23 /*
24  * This file contains entry functions for memory management of ISP driver
25  */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>      /* for kmap */
30 #include <linux/io.h>           /* for page_to_phys */
31 #include <linux/sysfs.h>
32
33 #include "hmm/hmm.h"
34 #include "hmm/hmm_pool.h"
35 #include "hmm/hmm_bo.h"
36
37 #include "atomisp_internal.h"
38 #include "asm/cacheflush.h"
39 #include "mmu/isp_mmu.h"
40 #include "mmu/sh_mmu_mrfld.h"
41
42 struct hmm_bo_device bo_device;
43 struct hmm_pool dynamic_pool;
44 struct hmm_pool reserved_pool;
45 static ia_css_ptr dummy_ptr;
46 static bool hmm_initialized;
47 struct _hmm_mem_stat hmm_mem_stat;
48
49 /*
50  * p: private
51  * s: shared
52  * u: user
53  * i: ion
54  */
55 static const char hmm_bo_type_string[] = "psui";
56
57 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
58                        char *buf, struct list_head *bo_list, bool active)
59 {
60         ssize_t ret = 0;
61         struct hmm_buffer_object *bo;
62         unsigned long flags;
63         int i;
64         long total[HMM_BO_LAST] = { 0 };
65         long count[HMM_BO_LAST] = { 0 };
66         int index1 = 0;
67         int index2 = 0;
68
69         ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
70         if (ret <= 0)
71                 return 0;
72
73         index1 += ret;
74
75         spin_lock_irqsave(&bo_device.list_lock, flags);
76         list_for_each_entry(bo, bo_list, list) {
77                 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
78                     (!active && !(bo->status & HMM_BO_ALLOCED))) {
79                         ret = scnprintf(buf + index1, PAGE_SIZE - index1,
80                                         "%c %d\n",
81                                         hmm_bo_type_string[bo->type], bo->pgnr);
82
83                         total[bo->type] += bo->pgnr;
84                         count[bo->type]++;
85                         if (ret > 0)
86                                 index1 += ret;
87                 }
88         }
89         spin_unlock_irqrestore(&bo_device.list_lock, flags);
90
91         for (i = 0; i < HMM_BO_LAST; i++) {
92                 if (count[i]) {
93                         ret = scnprintf(buf + index1 + index2,
94                                         PAGE_SIZE - index1 - index2,
95                                         "%ld %c buffer objects: %ld KB\n",
96                                         count[i], hmm_bo_type_string[i],
97                                         total[i] * 4);
98                         if (ret > 0)
99                                 index2 += ret;
100                 }
101         }
102
103         /* Add trailing zero, not included by scnprintf */
104         return index1 + index2 + 1;
105 }
106
107 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
108                               char *buf)
109 {
110         return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
111 }
112
113 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
114                             char *buf)
115 {
116         return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
117 }
118
119 static ssize_t reserved_pool_show(struct device *dev,
120                                   struct device_attribute *attr,
121                                   char *buf)
122 {
123         ssize_t ret = 0;
124
125         struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
126         unsigned long flags;
127
128         if (!pinfo || !pinfo->initialized)
129                 return 0;
130
131         spin_lock_irqsave(&pinfo->list_lock, flags);
132         ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
133                         pinfo->index, pinfo->pgnr);
134         spin_unlock_irqrestore(&pinfo->list_lock, flags);
135
136         if (ret > 0)
137                 ret++; /* Add trailing zero, not included by scnprintf */
138
139         return ret;
140 };
141
142 static ssize_t dynamic_pool_show(struct device *dev,
143                                  struct device_attribute *attr,
144                                  char *buf)
145 {
146         ssize_t ret = 0;
147
148         struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
149         unsigned long flags;
150
151         if (!pinfo || !pinfo->initialized)
152                 return 0;
153
154         spin_lock_irqsave(&pinfo->list_lock, flags);
155         ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
156                         pinfo->pgnr, pinfo->pool_size);
157         spin_unlock_irqrestore(&pinfo->list_lock, flags);
158
159         if (ret > 0)
160                 ret++; /* Add trailing zero, not included by scnprintf */
161
162         return ret;
163 };
164
165 static DEVICE_ATTR(active_bo, 0444, active_bo_show, NULL);
166 static DEVICE_ATTR(free_bo, 0444, free_bo_show, NULL);
167 static DEVICE_ATTR(reserved_pool, 0444, reserved_pool_show, NULL);
168 static DEVICE_ATTR(dynamic_pool, 0444, dynamic_pool_show, NULL);
169
170 static struct attribute *sysfs_attrs_ctrl[] = {
171         &dev_attr_active_bo.attr,
172         &dev_attr_free_bo.attr,
173         &dev_attr_reserved_pool.attr,
174         &dev_attr_dynamic_pool.attr,
175         NULL
176 };
177
178 static struct attribute_group atomisp_attribute_group[] = {
179         {.attrs = sysfs_attrs_ctrl },
180 };
181
182 int hmm_init(void)
183 {
184         int ret;
185
186         ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
187                                  ISP_VM_START, ISP_VM_SIZE);
188         if (ret)
189                 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
190
191         hmm_initialized = true;
192
193         /*
194          * As hmm use NULL to indicate invalid ISP virtual address,
195          * and ISP_VM_START is defined to 0 too, so we allocate
196          * one piece of dummy memory, which should return value 0,
197          * at the beginning, to avoid hmm_alloc return 0 in the
198          * further allocation.
199          */
200         dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
201
202         if (!ret) {
203                 ret = sysfs_create_group(&atomisp_dev->kobj,
204                                          atomisp_attribute_group);
205                 if (ret)
206                         dev_err(atomisp_dev,
207                                 "%s Failed to create sysfs\n", __func__);
208         }
209
210         return ret;
211 }
212
213 void hmm_cleanup(void)
214 {
215         sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
216
217         /* free dummy memory first */
218         hmm_free(dummy_ptr);
219         dummy_ptr = 0;
220
221         hmm_bo_device_exit(&bo_device);
222         hmm_initialized = false;
223 }
224
225 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
226                      int from_highmem, void *userptr, bool cached)
227 {
228         unsigned int pgnr;
229         struct hmm_buffer_object *bo;
230         int ret;
231
232         /*
233          * Check if we are initialized. In the ideal world we wouldn't need
234          * this but we can tackle it once the driver is a lot cleaner
235          */
236
237         if (!hmm_initialized)
238                 hmm_init();
239         /* Get page number from size */
240         pgnr = size_to_pgnr_ceil(bytes);
241
242         /* Buffer object structure init */
243         bo = hmm_bo_alloc(&bo_device, pgnr);
244         if (!bo) {
245                 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
246                 goto create_bo_err;
247         }
248
249         /* Allocate pages for memory */
250         ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
251         if (ret) {
252                 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
253                 goto alloc_page_err;
254         }
255
256         /* Combind the virtual address and pages togather */
257         ret = hmm_bo_bind(bo);
258         if (ret) {
259                 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
260                 goto bind_err;
261         }
262
263         hmm_mem_stat.tol_cnt += pgnr;
264
265         return bo->start;
266
267 bind_err:
268         hmm_bo_free_pages(bo);
269 alloc_page_err:
270         hmm_bo_unref(bo);
271 create_bo_err:
272         return 0;
273 }
274
275 void hmm_free(ia_css_ptr virt)
276 {
277         struct hmm_buffer_object *bo;
278
279         WARN_ON(!virt);
280
281         bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
282
283         if (!bo) {
284                 dev_err(atomisp_dev,
285                         "can not find buffer object start with address 0x%x\n",
286                         (unsigned int)virt);
287                 return;
288         }
289
290         hmm_mem_stat.tol_cnt -= bo->pgnr;
291
292         hmm_bo_unbind(bo);
293         hmm_bo_free_pages(bo);
294         hmm_bo_unref(bo);
295 }
296
297 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
298 {
299         if (!bo) {
300                 dev_err(atomisp_dev,
301                         "can not find buffer object contains address 0x%x\n",
302                         ptr);
303                 return -EINVAL;
304         }
305
306         if (!hmm_bo_page_allocated(bo)) {
307                 dev_err(atomisp_dev,
308                         "buffer object has no page allocated.\n");
309                 return -EINVAL;
310         }
311
312         if (!hmm_bo_allocated(bo)) {
313                 dev_err(atomisp_dev,
314                         "buffer object has no virtual address space allocated.\n");
315                 return -EINVAL;
316         }
317
318         return 0;
319 }
320
321 /* Read function in ISP memory management */
322 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
323                                   unsigned int bytes)
324 {
325         struct hmm_buffer_object *bo;
326         unsigned int idx, offset, len;
327         char *src, *des;
328         int ret;
329
330         bo = hmm_bo_device_search_in_range(&bo_device, virt);
331         ret = hmm_check_bo(bo, virt);
332         if (ret)
333                 return ret;
334
335         des = (char *)data;
336         while (bytes) {
337                 idx = (virt - bo->start) >> PAGE_SHIFT;
338                 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
339
340                 src = (char *)kmap(bo->page_obj[idx].page) + offset;
341
342                 if ((bytes + offset) >= PAGE_SIZE) {
343                         len = PAGE_SIZE - offset;
344                         bytes -= len;
345                 } else {
346                         len = bytes;
347                         bytes = 0;
348                 }
349
350                 virt += len;    /* update virt for next loop */
351
352                 if (des) {
353                         memcpy(des, src, len);
354                         des += len;
355                 }
356
357                 clflush_cache_range(src, len);
358
359                 kunmap(bo->page_obj[idx].page);
360         }
361
362         return 0;
363 }
364
365 /* Read function in ISP memory management */
366 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
367 {
368         struct hmm_buffer_object *bo;
369         int ret;
370
371         bo = hmm_bo_device_search_in_range(&bo_device, virt);
372         ret = hmm_check_bo(bo, virt);
373         if (ret)
374                 return ret;
375
376         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
377                 void *src = bo->vmap_addr;
378
379                 src += (virt - bo->start);
380                 memcpy(data, src, bytes);
381                 if (bo->status & HMM_BO_VMAPED_CACHED)
382                         clflush_cache_range(src, bytes);
383         } else {
384                 void *vptr;
385
386                 vptr = hmm_bo_vmap(bo, true);
387                 if (!vptr)
388                         return load_and_flush_by_kmap(virt, data, bytes);
389                 else
390                         vptr = vptr + (virt - bo->start);
391
392                 memcpy(data, vptr, bytes);
393                 clflush_cache_range(vptr, bytes);
394                 hmm_bo_vunmap(bo);
395         }
396
397         return 0;
398 }
399
400 /* Read function in ISP memory management */
401 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
402 {
403         if (!data) {
404                 dev_err(atomisp_dev,
405                         "hmm_load NULL argument\n");
406                 return -EINVAL;
407         }
408         return load_and_flush(virt, data, bytes);
409 }
410
411 /* Flush hmm data from the data cache */
412 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
413 {
414         return load_and_flush(virt, NULL, bytes);
415 }
416
417 /* Write function in ISP memory management */
418 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
419 {
420         struct hmm_buffer_object *bo;
421         unsigned int idx, offset, len;
422         char *src, *des;
423         int ret;
424
425         bo = hmm_bo_device_search_in_range(&bo_device, virt);
426         ret = hmm_check_bo(bo, virt);
427         if (ret)
428                 return ret;
429
430         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
431                 void *dst = bo->vmap_addr;
432
433                 dst += (virt - bo->start);
434                 memcpy(dst, data, bytes);
435                 if (bo->status & HMM_BO_VMAPED_CACHED)
436                         clflush_cache_range(dst, bytes);
437         } else {
438                 void *vptr;
439
440                 vptr = hmm_bo_vmap(bo, true);
441                 if (vptr) {
442                         vptr = vptr + (virt - bo->start);
443
444                         memcpy(vptr, data, bytes);
445                         clflush_cache_range(vptr, bytes);
446                         hmm_bo_vunmap(bo);
447                         return 0;
448                 }
449         }
450
451         src = (char *)data;
452         while (bytes) {
453                 idx = (virt - bo->start) >> PAGE_SHIFT;
454                 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
455
456                 if (in_atomic())
457                         des = (char *)kmap_atomic(bo->page_obj[idx].page);
458                 else
459                         des = (char *)kmap(bo->page_obj[idx].page);
460
461                 if (!des) {
462                         dev_err(atomisp_dev,
463                                 "kmap buffer object page failed: pg_idx = %d\n",
464                                 idx);
465                         return -EINVAL;
466                 }
467
468                 des += offset;
469
470                 if ((bytes + offset) >= PAGE_SIZE) {
471                         len = PAGE_SIZE - offset;
472                         bytes -= len;
473                 } else {
474                         len = bytes;
475                         bytes = 0;
476                 }
477
478                 virt += len;
479
480                 memcpy(des, src, len);
481
482                 src += len;
483
484                 clflush_cache_range(des, len);
485
486                 if (in_atomic())
487                         /*
488                          * Note: kunmap_atomic requires return addr from
489                          * kmap_atomic, not the page. See linux/highmem.h
490                          */
491                         kunmap_atomic(des - offset);
492                 else
493                         kunmap(bo->page_obj[idx].page);
494         }
495
496         return 0;
497 }
498
499 /* memset function in ISP memory management */
500 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
501 {
502         struct hmm_buffer_object *bo;
503         unsigned int idx, offset, len;
504         char *des;
505         int ret;
506
507         bo = hmm_bo_device_search_in_range(&bo_device, virt);
508         ret = hmm_check_bo(bo, virt);
509         if (ret)
510                 return ret;
511
512         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
513                 void *dst = bo->vmap_addr;
514
515                 dst += (virt - bo->start);
516                 memset(dst, c, bytes);
517
518                 if (bo->status & HMM_BO_VMAPED_CACHED)
519                         clflush_cache_range(dst, bytes);
520         } else {
521                 void *vptr;
522
523                 vptr = hmm_bo_vmap(bo, true);
524                 if (vptr) {
525                         vptr = vptr + (virt - bo->start);
526                         memset(vptr, c, bytes);
527                         clflush_cache_range(vptr, bytes);
528                         hmm_bo_vunmap(bo);
529                         return 0;
530                 }
531         }
532
533         while (bytes) {
534                 idx = (virt - bo->start) >> PAGE_SHIFT;
535                 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
536
537                 des = (char *)kmap(bo->page_obj[idx].page) + offset;
538
539                 if ((bytes + offset) >= PAGE_SIZE) {
540                         len = PAGE_SIZE - offset;
541                         bytes -= len;
542                 } else {
543                         len = bytes;
544                         bytes = 0;
545                 }
546
547                 virt += len;
548
549                 memset(des, c, len);
550
551                 clflush_cache_range(des, len);
552
553                 kunmap(bo->page_obj[idx].page);
554         }
555
556         return 0;
557 }
558
559 /* Virtual address to physical address convert */
560 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
561 {
562         unsigned int idx, offset;
563         struct hmm_buffer_object *bo;
564
565         bo = hmm_bo_device_search_in_range(&bo_device, virt);
566         if (!bo) {
567                 dev_err(atomisp_dev,
568                         "can not find buffer object contains address 0x%x\n",
569                         virt);
570                 return -1;
571         }
572
573         idx = (virt - bo->start) >> PAGE_SHIFT;
574         offset = (virt - bo->start) - (idx << PAGE_SHIFT);
575
576         return page_to_phys(bo->page_obj[idx].page) + offset;
577 }
578
579 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
580 {
581         struct hmm_buffer_object *bo;
582
583         bo = hmm_bo_device_search_start(&bo_device, virt);
584         if (!bo) {
585                 dev_err(atomisp_dev,
586                         "can not find buffer object start with address 0x%x\n",
587                         virt);
588                 return -EINVAL;
589         }
590
591         return hmm_bo_mmap(vma, bo);
592 }
593
594 /* Map ISP virtual address into IA virtual address */
595 void *hmm_vmap(ia_css_ptr virt, bool cached)
596 {
597         struct hmm_buffer_object *bo;
598         void *ptr;
599
600         bo = hmm_bo_device_search_in_range(&bo_device, virt);
601         if (!bo) {
602                 dev_err(atomisp_dev,
603                         "can not find buffer object contains address 0x%x\n",
604                         virt);
605                 return NULL;
606         }
607
608         ptr = hmm_bo_vmap(bo, cached);
609         if (ptr)
610                 return ptr + (virt - bo->start);
611         else
612                 return NULL;
613 }
614
615 /* Flush the memory which is mapped as cached memory through hmm_vmap */
616 void hmm_flush_vmap(ia_css_ptr virt)
617 {
618         struct hmm_buffer_object *bo;
619
620         bo = hmm_bo_device_search_in_range(&bo_device, virt);
621         if (!bo) {
622                 dev_warn(atomisp_dev,
623                          "can not find buffer object contains address 0x%x\n",
624                          virt);
625                 return;
626         }
627
628         hmm_bo_flush_vmap(bo);
629 }
630
631 void hmm_vunmap(ia_css_ptr virt)
632 {
633         struct hmm_buffer_object *bo;
634
635         bo = hmm_bo_device_search_in_range(&bo_device, virt);
636         if (!bo) {
637                 dev_warn(atomisp_dev,
638                          "can not find buffer object contains address 0x%x\n",
639                          virt);
640                 return;
641         }
642
643         hmm_bo_vunmap(bo);
644 }
645
646 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
647 {
648         switch (pool_type) {
649         case HMM_POOL_TYPE_RESERVED:
650                 reserved_pool.pops = &reserved_pops;
651                 return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
652                                                      pool_size);
653         case HMM_POOL_TYPE_DYNAMIC:
654                 dynamic_pool.pops = &dynamic_pops;
655                 return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
656                                                     pool_size);
657         default:
658                 dev_err(atomisp_dev, "invalid pool type.\n");
659                 return -EINVAL;
660         }
661 }
662
663 void hmm_pool_unregister(enum hmm_pool_type pool_type)
664 {
665         switch (pool_type) {
666         case HMM_POOL_TYPE_RESERVED:
667                 if (reserved_pool.pops && reserved_pool.pops->pool_exit)
668                         reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
669                 break;
670         case HMM_POOL_TYPE_DYNAMIC:
671                 if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
672                         dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
673                 break;
674         default:
675                 dev_err(atomisp_dev, "invalid pool type.\n");
676                 break;
677         }
678
679         return;
680 }
681
682 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
683 {
684         return hmm_vmap(ptr, cached);
685         /* vmunmap will be done in hmm_bo_release() */
686 }
687
688 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
689 {
690         struct hmm_buffer_object *bo;
691
692         bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
693         if (bo)
694                 return bo->start;
695
696         dev_err(atomisp_dev,
697                 "can not find buffer object whose kernel virtual address is %p\n",
698                 ptr);
699         return 0;
700 }
701
702 void hmm_show_mem_stat(const char *func, const int line)
703 {
704         trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d  dyc_thr=%d dyc_size=%d.\n",
705                      hmm_mem_stat.tol_cnt,
706                      hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
707                      hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
708                      hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
709 }
710
711 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
712 {
713         hmm_mem_stat.res_size = res_pgnr;
714         /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
715         if (0 == hmm_mem_stat.res_size) {
716                 hmm_mem_stat.res_size = -1;
717                 hmm_mem_stat.res_cnt = -1;
718         }
719
720         /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
721         if (!dyc_en) {
722                 hmm_mem_stat.dyc_size = -1;
723                 hmm_mem_stat.dyc_thr = -1;
724         } else {
725                 hmm_mem_stat.dyc_size = 0;
726                 hmm_mem_stat.dyc_thr = dyc_pgnr;
727         }
728         hmm_mem_stat.usr_size = 0;
729         hmm_mem_stat.sys_size = 0;
730         hmm_mem_stat.tol_cnt = 0;
731 }