f585745b1abc12d626f771d3681a985071d34df6
[releases.git] / pci-dma.c
1 /* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/types.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/export.h>
17 #include <linux/highmem.h>
18 #include <linux/scatterlist.h>
19 #include <asm/io.h>
20
21 static void *frv_dma_alloc(struct device *hwdev, size_t size,
22                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
23 {
24         void *ret;
25
26         ret = consistent_alloc(gfp, size, dma_handle);
27         if (ret)
28                 memset(ret, 0, size);
29
30         return ret;
31 }
32
33 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
34                 dma_addr_t dma_handle, unsigned long attrs)
35 {
36         consistent_free(vaddr);
37 }
38
39 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
40                 int nents, enum dma_data_direction direction,
41                 unsigned long attrs)
42 {
43         unsigned long dampr2;
44         void *vaddr;
45         int i;
46         struct scatterlist *sg;
47
48         BUG_ON(direction == DMA_NONE);
49
50         dampr2 = __get_DAMPR(2);
51
52         for_each_sg(sglist, sg, nents, i) {
53                 vaddr = kmap_atomic_primary(sg_page(sg));
54
55                 frv_dcache_writeback((unsigned long) vaddr,
56                                      (unsigned long) vaddr + PAGE_SIZE);
57
58         }
59
60         kunmap_atomic_primary(vaddr);
61         if (dampr2) {
62                 __set_DAMPR(2, dampr2);
63                 __set_IAMPR(2, dampr2);
64         }
65
66         return nents;
67 }
68
69 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
70                 unsigned long offset, size_t size,
71                 enum dma_data_direction direction, unsigned long attrs)
72 {
73         flush_dcache_page(page);
74         return (dma_addr_t) page_to_phys(page) + offset;
75 }
76
77 static void frv_dma_sync_single_for_device(struct device *dev,
78                 dma_addr_t dma_handle, size_t size,
79                 enum dma_data_direction direction)
80 {
81         flush_write_buffers();
82 }
83
84 static void frv_dma_sync_sg_for_device(struct device *dev,
85                 struct scatterlist *sg, int nelems,
86                 enum dma_data_direction direction)
87 {
88         flush_write_buffers();
89 }
90
91
92 static int frv_dma_supported(struct device *dev, u64 mask)
93 {
94         /*
95          * we fall back to GFP_DMA when the mask isn't all 1s,
96          * so we can't guarantee allocations that must be
97          * within a tighter range than GFP_DMA..
98          */
99         if (mask < 0x00ffffff)
100                 return 0;
101         return 1;
102 }
103
104 struct dma_map_ops frv_dma_ops = {
105         .alloc                  = frv_dma_alloc,
106         .free                   = frv_dma_free,
107         .map_page               = frv_dma_map_page,
108         .map_sg                 = frv_dma_map_sg,
109         .sync_single_for_device = frv_dma_sync_single_for_device,
110         .sync_sg_for_device     = frv_dma_sync_sg_for_device,
111         .dma_supported          = frv_dma_supported,
112 };
113 EXPORT_SYMBOL(frv_dma_ops);