2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/spinlock.h>
31 #include <linux/mutex.h>
32 #include "mmu/isp_mmu.h"
33 #include "hmm/hmm_common.h"
34 #include "ia_css_types.h"
36 #define check_bodev_null_return(bdev, exp) \
37 check_null_return(bdev, exp, \
38 "NULL hmm_bo_device.\n")
40 #define check_bodev_null_return_void(bdev) \
41 check_null_return_void(bdev, \
42 "NULL hmm_bo_device.\n")
44 #define check_bo_status_yes_goto(bo, _status, label) \
45 var_not_equal_goto((bo->status & (_status)), (_status), \
47 "HMM buffer status not contain %s.\n", \
50 #define check_bo_status_no_goto(bo, _status, label) \
51 var_equal_goto((bo->status & (_status)), (_status), \
53 "HMM buffer status contains %s.\n", \
56 #define rbtree_node_to_hmm_bo(root_node) \
57 container_of((root_node), struct hmm_buffer_object, node)
59 #define list_to_hmm_bo(list_ptr) \
60 list_entry((list_ptr), struct hmm_buffer_object, list)
62 #define kref_to_hmm_bo(kref_ptr) \
63 list_entry((kref_ptr), struct hmm_buffer_object, kref)
65 #define check_bo_null_return(bo, exp) \
66 check_null_return(bo, exp, "NULL hmm buffer object.\n")
68 #define check_bo_null_return_void(bo) \
69 check_null_return_void(bo, "NULL hmm buffer object.\n")
71 #define HMM_MAX_ORDER 3
72 #define HMM_MIN_ORDER 0
74 #define ISP_VM_START 0x0
75 #define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
76 #define ISP_PTR_NULL NULL
78 #define HMM_BO_DEVICE_INITED 0x1
91 HMM_PAGE_TYPE_RESERVED,
92 HMM_PAGE_TYPE_DYNAMIC,
93 HMM_PAGE_TYPE_GENERAL,
96 #define HMM_BO_MASK 0x1
97 #define HMM_BO_FREE 0x0
98 #define HMM_BO_ALLOCED 0x1
99 #define HMM_BO_PAGE_ALLOCED 0x2
100 #define HMM_BO_BINDED 0x4
101 #define HMM_BO_MMAPED 0x8
102 #define HMM_BO_VMAPED 0x10
103 #define HMM_BO_VMAPED_CACHED 0x20
104 #define HMM_BO_ACTIVE 0x1000
105 #define HMM_BO_MEM_TYPE_USER 0x1
106 #define HMM_BO_MEM_TYPE_PFN 0x2
108 struct hmm_bo_device {
111 /* start/pgnr/size is used to record the virtual memory of this bo */
116 /* list lock is used to protect the entire_bo_list */
117 spinlock_t list_lock;
119 struct ion_client *iclient;
123 /* linked list for entire buffer object */
124 struct list_head entire_bo_list;
125 /* rbtree for maintain entire allocated vm */
126 struct rb_root allocated_rbtree;
127 /* rbtree for maintain entire free vm */
128 struct rb_root free_rbtree;
129 struct mutex rbtree_mutex;
130 struct kmem_cache *bo_cache;
133 struct hmm_page_object {
135 enum hmm_page_type type;
138 struct hmm_buffer_object {
139 struct hmm_bo_device *bdev;
140 struct list_head list;
143 /* mutex protecting this BO */
145 enum hmm_bo_type type;
146 struct hmm_page_object *page_obj; /* physical pages */
150 struct ion_handle *ihandle;
154 void *vmap_addr; /* kernel virtual address by vmap */
161 * When insert a bo which has the same pgnr with an existed
162 * bo node in the free_rbtree, using "prev & next" pointer
163 * to maintain a bo linked list instead of insert this bo
164 * into free_rbtree directly, it will make sure each node
165 * in free_rbtree has different pgnr.
166 * "prev & next" default is NULL.
168 struct hmm_buffer_object *prev;
169 struct hmm_buffer_object *next;
172 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
175 void hmm_bo_release(struct hmm_buffer_object *bo);
177 int hmm_bo_device_init(struct hmm_bo_device *bdev,
178 struct isp_mmu_client *mmu_driver,
179 unsigned int vaddr_start, unsigned int size);
182 * clean up all hmm_bo_device related things.
184 void hmm_bo_device_exit(struct hmm_bo_device *bdev);
187 * whether the bo device is inited or not.
189 int hmm_bo_device_inited(struct hmm_bo_device *bdev);
192 * increse buffer object reference.
194 void hmm_bo_ref(struct hmm_buffer_object *bo);
197 * decrese buffer object reference. if reference reaches 0,
198 * release function of the buffer object will be called.
200 * this call is also used to release hmm_buffer_object or its
201 * upper level object with it embedded in. you need to call
202 * this function when it is no longer used.
206 * user dont need to care about internal resource release of
207 * the buffer object in the release callback, it will be
208 * handled internally.
210 * this call will only release internal resource of the buffer
211 * object but will not free the buffer object itself, as the
212 * buffer object can be both pre-allocated statically or
213 * dynamically allocated. so user need to deal with the release
214 * of the buffer object itself manually. below example shows
215 * the normal case of using the buffer object.
217 * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
223 * struct hmm_buffer_object bo;
225 * hmm_bo_init(bdev, &bo, pgnr, NULL);
229 void hmm_bo_unref(struct hmm_buffer_object *bo);
233 * allocate/free physical pages for the bo. will try to alloc mem
234 * from highmem if from_highmem is set, and type indicate that the
235 * pages will be allocated by using video driver (for share buffer)
236 * or by ISP driver itself.
240 int hmm_bo_allocated(struct hmm_buffer_object *bo);
244 * allocate/free physical pages for the bo. will try to alloc mem
245 * from highmem if from_highmem is set, and type indicate that the
246 * pages will be allocated by using video driver (for share buffer)
247 * or by ISP driver itself.
249 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
250 enum hmm_bo_type type, int from_highmem,
251 void *userptr, bool cached);
252 void hmm_bo_free_pages(struct hmm_buffer_object *bo);
253 int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
256 * get physical page info of the bo.
258 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
259 struct hmm_page_object **page_obj, int *pgnr);
262 * bind/unbind the physical pages to a virtual address space.
264 int hmm_bo_bind(struct hmm_buffer_object *bo);
265 void hmm_bo_unbind(struct hmm_buffer_object *bo);
266 int hmm_bo_binded(struct hmm_buffer_object *bo);
269 * vmap buffer object's pages to contiguous kernel virtual address.
270 * if the buffer has been vmaped, return the virtual address directly.
272 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
275 * flush the cache for the vmapped buffer object's pages,
276 * if the buffer has not been vmapped, return directly.
278 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
281 * vunmap buffer object's kernel virtual address.
283 void hmm_bo_vunmap(struct hmm_buffer_object *bo);
286 * mmap the bo's physical pages to specific vma.
288 * vma's address space size must be the same as bo's size,
289 * otherwise it will return -EINVAL.
291 * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
293 int hmm_bo_mmap(struct vm_area_struct *vma,
294 struct hmm_buffer_object *bo);
296 extern struct hmm_pool dynamic_pool;
297 extern struct hmm_pool reserved_pool;
300 * find the buffer object by its virtual address vaddr.
301 * return NULL if no such buffer object found.
303 struct hmm_buffer_object *hmm_bo_device_search_start(
304 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
307 * find the buffer object by its virtual address.
308 * it does not need to be the start address of one bo,
309 * it can be an address within the range of one bo.
310 * return NULL if no such buffer object found.
312 struct hmm_buffer_object *hmm_bo_device_search_in_range(
313 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
316 * find the buffer object with kernel virtual address vaddr.
317 * return NULL if no such buffer object found.
319 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
320 struct hmm_bo_device *bdev, const void *vaddr);