1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2020 Intel Corporation. All rights reserved.
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
17 struct dma_buf_attach_ops;
20 struct ib_device *ibdev;
21 struct mm_struct *owning_mm;
24 unsigned long address;
28 struct sg_append_table sgt_append;
31 struct ib_umem_dmabuf {
33 struct dma_buf_attachment *attach;
35 struct scatterlist *first_sg;
36 struct scatterlist *last_sg;
37 unsigned long first_sg_offset;
38 unsigned long last_sg_trim;
43 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45 return container_of(umem, struct ib_umem_dmabuf, umem);
48 /* Returns the offset of the umem start relative to the first page. */
49 static inline int ib_umem_offset(struct ib_umem *umem)
51 return umem->address & ~PAGE_MASK;
54 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
57 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
61 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
64 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
65 ALIGN_DOWN(umem->iova, pgsz))) /
69 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
71 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
74 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
78 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
79 umem->sgt_append.sgt.nents, pgsz);
80 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
81 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
84 static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
86 return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
90 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
91 * @umem: umem to iterate over
92 * @pgsz: Page size to split the list into
94 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
95 * returned DMA blocks will be aligned to pgsz and span the range:
96 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
98 * Performs exactly ib_umem_num_dma_blocks() iterations.
100 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
101 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
102 __rdma_umem_block_iter_next(biter);)
104 #ifdef CONFIG_INFINIBAND_USER_MEM
106 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
107 size_t size, int access);
108 void ib_umem_release(struct ib_umem *umem);
109 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
111 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
112 unsigned long pgsz_bitmap,
116 * ib_umem_find_best_pgoff - Find best HW page size
119 * @pgsz_bitmap bitmap of HW supported page sizes
120 * @pgoff_bitmask: Mask of bits that can be represented with an offset
122 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
123 * an IOVA it accepts a bitmask specifying what address bits can be represented
124 * with a page offset.
126 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
127 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
130 * If the pgoff_bitmask requires either alignment in the low bit or an
131 * unavailable page size for the high bits, this function returns 0.
133 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
134 unsigned long pgsz_bitmap,
137 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
140 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
141 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
142 dma_addr & pgoff_bitmask);
145 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
146 unsigned long offset, size_t size,
148 const struct dma_buf_attach_ops *ops);
149 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
150 unsigned long offset,
153 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
154 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
155 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
157 #else /* CONFIG_INFINIBAND_USER_MEM */
159 #include <linux/err.h>
161 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
162 unsigned long addr, size_t size,
165 return ERR_PTR(-EOPNOTSUPP);
167 static inline void ib_umem_release(struct ib_umem *umem) { }
168 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
172 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
173 unsigned long pgsz_bitmap,
178 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
179 unsigned long pgsz_bitmap,
185 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
186 unsigned long offset,
189 struct dma_buf_attach_ops *ops)
191 return ERR_PTR(-EOPNOTSUPP);
193 static inline struct ib_umem_dmabuf *
194 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
195 size_t size, int fd, int access)
197 return ERR_PTR(-EOPNOTSUPP);
199 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
203 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
204 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
206 #endif /* CONFIG_INFINIBAND_USER_MEM */
207 #endif /* IB_UMEM_H */