2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #define DMA_BAD_ADDER ((u64)0)
39 static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr)
41 return dma_addr == DMA_BAD_ADDER;
44 static u64 rxe_dma_map_single(struct ib_device *dev,
45 void *cpu_addr, size_t size,
46 enum dma_data_direction direction)
48 WARN_ON(!valid_dma_direction(direction));
49 return (uintptr_t)cpu_addr;
52 static void rxe_dma_unmap_single(struct ib_device *dev,
53 u64 addr, size_t size,
54 enum dma_data_direction direction)
56 WARN_ON(!valid_dma_direction(direction));
59 static u64 rxe_dma_map_page(struct ib_device *dev,
62 size_t size, enum dma_data_direction direction)
66 WARN_ON(!valid_dma_direction(direction));
68 if (offset + size > PAGE_SIZE) {
73 addr = (uintptr_t)page_address(page);
81 static void rxe_dma_unmap_page(struct ib_device *dev,
82 u64 addr, size_t size,
83 enum dma_data_direction direction)
85 WARN_ON(!valid_dma_direction(direction));
88 static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl,
89 int nents, enum dma_data_direction direction)
91 struct scatterlist *sg;
96 WARN_ON(!valid_dma_direction(direction));
98 for_each_sg(sgl, sg, nents, i) {
99 addr = (uintptr_t)page_address(sg_page(sg));
104 sg->dma_address = addr + sg->offset;
105 #ifdef CONFIG_NEED_SG_DMA_LENGTH
106 sg->dma_length = sg->length;
113 static void rxe_unmap_sg(struct ib_device *dev,
114 struct scatterlist *sg, int nents,
115 enum dma_data_direction direction)
117 WARN_ON(!valid_dma_direction(direction));
120 static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
121 int nents, enum dma_data_direction direction,
124 return rxe_map_sg(dev, sgl, nents, direction);
127 static void rxe_unmap_sg_attrs(struct ib_device *dev,
128 struct scatterlist *sg, int nents,
129 enum dma_data_direction direction,
132 rxe_unmap_sg(dev, sg, nents, direction);
135 static void rxe_sync_single_for_cpu(struct ib_device *dev,
137 size_t size, enum dma_data_direction dir)
141 static void rxe_sync_single_for_device(struct ib_device *dev,
143 size_t size, enum dma_data_direction dir)
147 static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size,
148 u64 *dma_handle, gfp_t flag)
153 p = alloc_pages(flag, get_order(size));
155 addr = page_address(p);
158 *dma_handle = (uintptr_t)addr;
163 static void rxe_dma_free_coherent(struct ib_device *dev, size_t size,
164 void *cpu_addr, u64 dma_handle)
166 free_pages((unsigned long)cpu_addr, get_order(size));
169 struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
170 .mapping_error = rxe_mapping_error,
171 .map_single = rxe_dma_map_single,
172 .unmap_single = rxe_dma_unmap_single,
173 .map_page = rxe_dma_map_page,
174 .unmap_page = rxe_dma_unmap_page,
175 .map_sg = rxe_map_sg,
176 .unmap_sg = rxe_unmap_sg,
177 .map_sg_attrs = rxe_map_sg_attrs,
178 .unmap_sg_attrs = rxe_unmap_sg_attrs,
179 .sync_single_for_cpu = rxe_sync_single_for_cpu,
180 .sync_single_for_device = rxe_sync_single_for_device,
181 .alloc_coherent = rxe_dma_alloc_coherent,
182 .free_coherent = rxe_dma_free_coherent