1 #ifndef _PARISC_DMA_MAPPING_H
2 #define _PARISC_DMA_MAPPING_H
5 #include <linux/scatterlist.h>
6 #include <asm/cacheflush.h>
8 /* See Documentation/DMA-API-HOWTO.txt */
10 int (*dma_supported)(struct device *dev, u64 mask);
11 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
12 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
13 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
14 dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
15 void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
16 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
17 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
18 void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
19 void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
20 void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
21 void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
25 ** We could live without the hppa_dma_ops indirection if we didn't want
26 ** to support 4 different coherent dma models with one binary (they will
27 ** someday be loadable modules):
28 ** I/O MMU consistent method dma_sync behavior
29 ** ============= ====================== =======================
30 ** a) PA-7x00LC uncachable host memory flush/purge
31 ** b) U2/Uturn cachable host memory NOP
32 ** c) Ike/Astro cachable host memory NOP
33 ** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel
35 ** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
37 ** Systems (eg PCX-T workstations) that don't fall into the above
38 ** categories will need to modify the needed drivers to perform
39 ** flush/purge and allocate "regular" cacheable pages for everything.
42 #define DMA_ERROR_CODE (~(dma_addr_t)0)
45 extern struct hppa_dma_ops pcxl_dma_ops;
46 extern struct hppa_dma_ops pcx_dma_ops;
49 extern struct hppa_dma_ops *hppa_dma_ops;
51 #define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
52 #define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
55 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
58 return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
62 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
65 return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
69 dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle)
72 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
76 dma_free_noncoherent(struct device *dev, size_t size,
77 void *vaddr, dma_addr_t dma_handle)
79 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
82 static inline dma_addr_t
83 dma_map_single(struct device *dev, void *ptr, size_t size,
84 enum dma_data_direction direction)
86 return hppa_dma_ops->map_single(dev, ptr, size, direction);
90 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
91 enum dma_data_direction direction)
93 hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
97 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
98 enum dma_data_direction direction)
100 return hppa_dma_ops->map_sg(dev, sg, nents, direction);
104 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
105 enum dma_data_direction direction)
107 hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
110 static inline dma_addr_t
111 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
112 size_t size, enum dma_data_direction direction)
114 return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
118 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
119 enum dma_data_direction direction)
121 dma_unmap_single(dev, dma_address, size, direction);
126 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
127 enum dma_data_direction direction)
129 if(hppa_dma_ops->dma_sync_single_for_cpu)
130 hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
134 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
135 enum dma_data_direction direction)
137 if(hppa_dma_ops->dma_sync_single_for_device)
138 hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
142 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
143 unsigned long offset, size_t size,
144 enum dma_data_direction direction)
146 if(hppa_dma_ops->dma_sync_single_for_cpu)
147 hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
151 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
152 unsigned long offset, size_t size,
153 enum dma_data_direction direction)
155 if(hppa_dma_ops->dma_sync_single_for_device)
156 hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
160 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
161 enum dma_data_direction direction)
163 if(hppa_dma_ops->dma_sync_sg_for_cpu)
164 hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
168 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
169 enum dma_data_direction direction)
171 if(hppa_dma_ops->dma_sync_sg_for_device)
172 hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
176 dma_supported(struct device *dev, u64 mask)
178 return hppa_dma_ops->dma_supported(dev, mask);
182 dma_set_mask(struct device *dev, u64 mask)
184 if(!dev->dma_mask || !dma_supported(dev, mask))
187 *dev->dma_mask = mask;
193 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
194 enum dma_data_direction direction)
196 if(hppa_dma_ops->dma_sync_single_for_cpu)
197 flush_kernel_dcache_range((unsigned long)vaddr, size);
201 parisc_walk_tree(struct device *dev)
203 struct device *otherdev;
204 if(likely(dev->platform_data != NULL))
205 return dev->platform_data;
206 /* OK, just traverse the bus to find it */
207 for(otherdev = dev->parent; otherdev;
208 otherdev = otherdev->parent) {
209 if(otherdev->platform_data) {
210 dev->platform_data = otherdev->platform_data;
214 return dev->platform_data;
217 #define GET_IOC(dev) ({ \
218 void *__pdata = parisc_walk_tree(dev); \
219 __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
222 #ifdef CONFIG_IOMMU_CCIO
223 struct parisc_device;
225 void * ccio_get_iommu(const struct parisc_device *dev);
226 int ccio_request_resource(const struct parisc_device *dev,
227 struct resource *res);
228 int ccio_allocate_resource(const struct parisc_device *dev,
229 struct resource *res, unsigned long size,
230 unsigned long min, unsigned long max, unsigned long align);
231 #else /* !CONFIG_IOMMU_CCIO */
232 #define ccio_get_iommu(dev) NULL
233 #define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
234 #define ccio_allocate_resource(dev, res, size, min, max, align) \
235 allocate_resource(&iomem_resource, res, size, min, max, \
237 #endif /* !CONFIG_IOMMU_CCIO */
239 #ifdef CONFIG_IOMMU_SBA
240 struct parisc_device;
241 void * sba_get_iommu(struct parisc_device *dev);
244 /* At the moment, we panic on error for IOMMU resource exaustion */
245 #define dma_mapping_error(dev, x) 0
247 /* This API cannot be supported on PA-RISC */
248 static inline int dma_mmap_coherent(struct device *dev,
249 struct vm_area_struct *vma, void *cpu_addr,
250 dma_addr_t dma_addr, size_t size)
255 static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
256 void *cpu_addr, dma_addr_t dma_addr,