2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * lfsr (linear feedback shift register) with period 255
40 static u8 rxe_get_key(void)
46 key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
47 ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
57 case RXE_MEM_TYPE_DMA:
61 case RXE_MEM_TYPE_FMR:
62 if (iova < mem->iova ||
63 length > mem->length ||
64 iova > mem->iova + mem->length - length)
73 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
74 | IB_ACCESS_REMOTE_WRITE \
75 | IB_ACCESS_REMOTE_ATOMIC)
77 static void rxe_mem_init(int access, struct rxe_mem *mem)
79 u32 lkey = mem->pelem.index << 8 | rxe_get_key();
80 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
82 if (mem->pelem.pool->type == RXE_TYPE_MR) {
83 mem->ibmr.lkey = lkey;
84 mem->ibmr.rkey = rkey;
89 mem->state = RXE_MEM_STATE_INVALID;
90 mem->type = RXE_MEM_TYPE_NONE;
91 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
94 void rxe_mem_cleanup(void *arg)
96 struct rxe_mem *mem = arg;
100 ib_umem_release(mem->umem);
103 for (i = 0; i < mem->num_map; i++)
110 static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
114 struct rxe_map **map = mem->map;
116 num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
118 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
122 for (i = 0; i < num_map; i++) {
123 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
128 WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
130 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
131 mem->map_mask = RXE_BUF_PER_MAP - 1;
133 mem->num_buf = num_buf;
134 mem->num_map = num_map;
135 mem->max_buf = num_map * RXE_BUF_PER_MAP;
140 for (i--; i >= 0; i--)
148 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
149 int access, struct rxe_mem *mem)
151 rxe_mem_init(access, mem);
154 mem->access = access;
155 mem->state = RXE_MEM_STATE_VALID;
156 mem->type = RXE_MEM_TYPE_DMA;
161 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
162 u64 length, u64 iova, int access, struct ib_udata *udata,
166 struct rxe_map **map;
167 struct rxe_phys_buf *buf = NULL;
168 struct ib_umem *umem;
169 struct scatterlist *sg;
174 umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
176 pr_warn("err %d from rxe_umem_get\n",
183 num_buf = umem->nmap;
185 rxe_mem_init(access, mem);
187 err = rxe_mem_alloc(rxe, mem, num_buf);
189 pr_warn("err %d from rxe_mem_alloc\n", err);
190 ib_umem_release(umem);
194 WARN_ON(!is_power_of_2(umem->page_size));
196 mem->page_shift = ilog2(umem->page_size);
197 mem->page_mask = umem->page_size - 1;
204 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
205 vaddr = page_address(sg_page(sg));
207 pr_warn("null vaddr\n");
208 ib_umem_release(umem);
213 buf->addr = (uintptr_t)vaddr;
214 buf->size = umem->page_size;
218 if (num_buf >= RXE_BUF_PER_MAP) {
228 mem->access = access;
229 mem->length = length;
232 mem->offset = ib_umem_offset(umem);
233 mem->state = RXE_MEM_STATE_VALID;
234 mem->type = RXE_MEM_TYPE_MR;
242 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
243 int max_pages, struct rxe_mem *mem)
247 rxe_mem_init(0, mem);
249 /* In fastreg, we also set the rkey */
250 mem->ibmr.rkey = mem->ibmr.lkey;
252 err = rxe_mem_alloc(rxe, mem, max_pages);
257 mem->max_buf = max_pages;
258 mem->state = RXE_MEM_STATE_FREE;
259 mem->type = RXE_MEM_TYPE_MR;
267 static void lookup_iova(
274 size_t offset = iova - mem->iova + mem->offset;
279 if (likely(mem->page_shift)) {
280 *offset_out = offset & mem->page_mask;
281 offset >>= mem->page_shift;
282 *n_out = offset & mem->map_mask;
283 *m_out = offset >> mem->map_shift;
288 length = mem->map[map_index]->buf[buf_index].size;
290 while (offset >= length) {
294 if (buf_index == RXE_BUF_PER_MAP) {
298 length = mem->map[map_index]->buf[buf_index].size;
303 *offset_out = offset;
307 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
313 if (mem->state != RXE_MEM_STATE_VALID) {
314 pr_warn("mem not in valid state\n");
320 addr = (void *)(uintptr_t)iova;
324 if (mem_check_range(mem, iova, length)) {
325 pr_warn("range violation\n");
330 lookup_iova(mem, iova, &m, &n, &offset);
332 if (offset + length > mem->map[m]->buf[n].size) {
333 pr_warn("crosses page boundary\n");
338 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
344 /* copy data from a range (vaddr, vaddr+length-1) to or from
345 * a mem object starting at iova. Compute incremental value of
346 * crc32 if crcp is not zero. caller must hold a reference to mem
348 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
349 enum copy_direction dir, u32 *crcp)
354 struct rxe_map **map;
355 struct rxe_phys_buf *buf;
359 u32 crc = crcp ? (*crcp) : 0;
361 if (mem->type == RXE_MEM_TYPE_DMA) {
364 src = (dir == to_mem_obj) ?
365 addr : ((void *)(uintptr_t)iova);
367 dest = (dir == to_mem_obj) ?
368 ((void *)(uintptr_t)iova) : addr;
371 *crcp = crc32_le(*crcp, src, length);
373 memcpy(dest, src, length);
380 err = mem_check_range(mem, iova, length);
386 lookup_iova(mem, iova, &m, &i, &offset);
389 buf = map[0]->buf + i;
394 va = (u8 *)(uintptr_t)buf->addr + offset;
395 src = (dir == to_mem_obj) ? addr : va;
396 dest = (dir == to_mem_obj) ? va : addr;
398 bytes = buf->size - offset;
404 crc = crc32_le(crc, src, bytes);
406 memcpy(dest, src, bytes);
415 if (i == RXE_BUF_PER_MAP) {
431 /* copy data in or out of a wqe, i.e. sg list
432 * under the control of a dma descriptor
438 struct rxe_dma_info *dma,
441 enum copy_direction dir,
445 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
446 int offset = dma->sge_offset;
447 int resid = dma->resid;
448 struct rxe_mem *mem = NULL;
455 if (length > resid) {
460 if (sge->length && (offset < sge->length)) {
461 mem = lookup_mem(pd, access, sge->lkey, lookup_local);
471 if (offset >= sge->length) {
480 if (dma->cur_sge >= dma->num_sge) {
486 mem = lookup_mem(pd, access, sge->lkey,
497 if (bytes > sge->length - offset)
498 bytes = sge->length - offset;
501 iova = sge->addr + offset;
503 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
514 dma->sge_offset = offset;
529 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
531 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
532 int offset = dma->sge_offset;
533 int resid = dma->resid;
538 if (offset >= sge->length) {
542 if (dma->cur_sge >= dma->num_sge)
548 if (bytes > sge->length - offset)
549 bytes = sge->length - offset;
556 dma->sge_offset = offset;
562 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
563 * depending on lookup_type
564 * (2) verify that the (qp) pd matches the mem pd
565 * (3) verify that the mem can support the requested access
566 * (4) verify that mem state is valid
568 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
569 enum lookup_type type)
572 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
573 int index = key >> 8;
575 if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
576 mem = rxe_pool_get_index(&rxe->mr_pool, index);
583 if ((type == lookup_local && mem->lkey != key) ||
584 (type == lookup_remote && mem->rkey != key))
590 if (access && !(access & mem->access))
593 if (mem->state != RXE_MEM_STATE_VALID)
604 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
605 u64 *page, int num_pages, u64 iova)
610 struct rxe_map **map;
611 struct rxe_phys_buf *buf;
614 if (num_pages > mem->max_buf) {
620 page_size = 1 << mem->page_shift;
624 for (i = 0; i < num_pages; i++) {
626 buf->size = page_size;
630 if (num_buf == RXE_BUF_PER_MAP) {
639 mem->length = num_pages << mem->page_shift;
640 mem->state = RXE_MEM_STATE_VALID;