GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / infiniband / ulp / iser / iser_initiator.c
1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
40
41 #include "iscsi_iser.h"
42
43 /* Register user buffer memory and initialize passive rdma
44  *  dto descriptor. Data size is stored in
45  *  task->data[ISER_DIR_IN].data_len, Protection size
46  *  os stored in task->prot[ISER_DIR_IN].data_len
47  */
48 static int iser_prepare_read_cmd(struct iscsi_task *task)
49
50 {
51         struct iscsi_iser_task *iser_task = task->dd_data;
52         struct iser_mem_reg *mem_reg;
53         int err;
54         struct iser_ctrl *hdr = &iser_task->desc.iser_header;
55
56         err = iser_dma_map_task_data(iser_task,
57                                      ISER_DIR_IN,
58                                      DMA_FROM_DEVICE);
59         if (err)
60                 return err;
61
62         err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
63         if (err) {
64                 iser_err("Failed to set up Data-IN RDMA\n");
65                 goto out_err;
66         }
67         mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
68
69         hdr->flags    |= ISER_RSV;
70         hdr->read_stag = cpu_to_be32(mem_reg->rkey);
71         hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
72
73         iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
74                  task->itt, mem_reg->rkey,
75                  (unsigned long long)mem_reg->sge.addr);
76
77         return 0;
78
79 out_err:
80         iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
81         return err;
82 }
83
84 /* Register user buffer memory and initialize passive rdma
85  *  dto descriptor. Data size is stored in
86  *  task->data[ISER_DIR_OUT].data_len, Protection size
87  *  is stored at task->prot[ISER_DIR_OUT].data_len
88  */
89 static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
90                                   unsigned int unsol_sz, unsigned int edtl)
91 {
92         struct iscsi_iser_task *iser_task = task->dd_data;
93         struct iser_mem_reg *mem_reg;
94         int err;
95         struct iser_ctrl *hdr = &iser_task->desc.iser_header;
96         struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
97         struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
98
99         err = iser_dma_map_task_data(iser_task,
100                                      ISER_DIR_OUT,
101                                      DMA_TO_DEVICE);
102         if (err)
103                 return err;
104
105         err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
106                                    buf_out->data_len == imm_sz);
107         if (err) {
108                 iser_err("Failed to register write cmd RDMA mem\n");
109                 goto out_err;
110         }
111
112         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
113
114         if (unsol_sz < edtl) {
115                 hdr->flags     |= ISER_WSV;
116                 if (buf_out->data_len > imm_sz) {
117                         hdr->write_stag = cpu_to_be32(mem_reg->rkey);
118                         hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
119                 }
120
121                 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
122                          task->itt, mem_reg->rkey,
123                          (unsigned long long)mem_reg->sge.addr, unsol_sz);
124         }
125
126         if (imm_sz > 0) {
127                 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
128                          task->itt, imm_sz);
129                 tx_dsg->addr = mem_reg->sge.addr;
130                 tx_dsg->length = imm_sz;
131                 tx_dsg->lkey = mem_reg->sge.lkey;
132                 iser_task->desc.num_sge = 2;
133         }
134
135         return 0;
136
137 out_err:
138         iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
139         return err;
140 }
141
142 /* creates a new tx descriptor and adds header regd buffer */
143 static void iser_create_send_desc(struct iser_conn *iser_conn,
144                 struct iser_tx_desc *tx_desc, enum iser_desc_type type,
145                 void (*done)(struct ib_cq *cq, struct ib_wc *wc))
146 {
147         struct iser_device *device = iser_conn->ib_conn.device;
148
149         tx_desc->type = type;
150         tx_desc->cqe.done = done;
151
152         ib_dma_sync_single_for_cpu(device->ib_device,
153                 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
154
155         memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
156         tx_desc->iser_header.flags = ISER_VER;
157         tx_desc->num_sge = 1;
158 }
159
160 static void iser_free_login_buf(struct iser_conn *iser_conn)
161 {
162         struct iser_device *device = iser_conn->ib_conn.device;
163         struct iser_login_desc *desc = &iser_conn->login_desc;
164
165         if (!desc->req)
166                 return;
167
168         ib_dma_unmap_single(device->ib_device, desc->req_dma,
169                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
170
171         ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
172                             ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
173
174         kfree(desc->req);
175         kfree(desc->rsp);
176
177         /* make sure we never redo any unmapping */
178         desc->req = NULL;
179         desc->rsp = NULL;
180 }
181
182 static int iser_alloc_login_buf(struct iser_conn *iser_conn)
183 {
184         struct iser_device *device = iser_conn->ib_conn.device;
185         struct iser_login_desc *desc = &iser_conn->login_desc;
186
187         desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
188         if (!desc->req)
189                 return -ENOMEM;
190
191         desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
192                                           ISCSI_DEF_MAX_RECV_SEG_LEN,
193                                           DMA_TO_DEVICE);
194         if (ib_dma_mapping_error(device->ib_device,
195                                 desc->req_dma))
196                 goto free_req;
197
198         desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
199         if (!desc->rsp)
200                 goto unmap_req;
201
202         desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
203                                            ISER_RX_LOGIN_SIZE,
204                                            DMA_FROM_DEVICE);
205         if (ib_dma_mapping_error(device->ib_device,
206                                 desc->rsp_dma))
207                 goto free_rsp;
208
209         return 0;
210
211 free_rsp:
212         kfree(desc->rsp);
213 unmap_req:
214         ib_dma_unmap_single(device->ib_device, desc->req_dma,
215                             ISCSI_DEF_MAX_RECV_SEG_LEN,
216                             DMA_TO_DEVICE);
217 free_req:
218         kfree(desc->req);
219
220         return -ENOMEM;
221 }
222
223 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
224                               struct iscsi_session *session)
225 {
226         int i, j;
227         u64 dma_addr;
228         struct iser_rx_desc *rx_desc;
229         struct ib_sge       *rx_sg;
230         struct ib_conn *ib_conn = &iser_conn->ib_conn;
231         struct iser_device *device = ib_conn->device;
232
233         iser_conn->qp_max_recv_dtos = session->cmds_max;
234
235         if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
236                                     iser_conn->pages_per_mr))
237                 goto create_rdma_reg_res_failed;
238
239         if (iser_alloc_login_buf(iser_conn))
240                 goto alloc_login_buf_fail;
241
242         iser_conn->num_rx_descs = session->cmds_max;
243         iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
244                                             sizeof(struct iser_rx_desc),
245                                             GFP_KERNEL);
246         if (!iser_conn->rx_descs)
247                 goto rx_desc_alloc_fail;
248
249         rx_desc = iser_conn->rx_descs;
250
251         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
252                 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
253                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
254                 if (ib_dma_mapping_error(device->ib_device, dma_addr))
255                         goto rx_desc_dma_map_failed;
256
257                 rx_desc->dma_addr = dma_addr;
258                 rx_desc->cqe.done = iser_task_rsp;
259                 rx_sg = &rx_desc->rx_sg;
260                 rx_sg->addr = rx_desc->dma_addr;
261                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
262                 rx_sg->lkey = device->pd->local_dma_lkey;
263         }
264
265         return 0;
266
267 rx_desc_dma_map_failed:
268         rx_desc = iser_conn->rx_descs;
269         for (j = 0; j < i; j++, rx_desc++)
270                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
271                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
272         kfree(iser_conn->rx_descs);
273         iser_conn->rx_descs = NULL;
274 rx_desc_alloc_fail:
275         iser_free_login_buf(iser_conn);
276 alloc_login_buf_fail:
277         iser_free_fastreg_pool(ib_conn);
278 create_rdma_reg_res_failed:
279         iser_err("failed allocating rx descriptors / data buffers\n");
280         return -ENOMEM;
281 }
282
283 void iser_free_rx_descriptors(struct iser_conn *iser_conn)
284 {
285         int i;
286         struct iser_rx_desc *rx_desc;
287         struct ib_conn *ib_conn = &iser_conn->ib_conn;
288         struct iser_device *device = ib_conn->device;
289
290         iser_free_fastreg_pool(ib_conn);
291
292         rx_desc = iser_conn->rx_descs;
293         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
294                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
295                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
296         kfree(iser_conn->rx_descs);
297         /* make sure we never redo any unmapping */
298         iser_conn->rx_descs = NULL;
299
300         iser_free_login_buf(iser_conn);
301 }
302
303 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
304 {
305         struct iser_conn *iser_conn = conn->dd_data;
306         struct iscsi_session *session = conn->session;
307         int err = 0;
308         int i;
309
310         iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
311         /* check if this is the last login - going to full feature phase */
312         if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
313                 goto out;
314
315         if (session->discovery_sess) {
316                 iser_info("Discovery session, re-using login RX buffer\n");
317                 goto out;
318         }
319
320         iser_info("Normal session, posting batch of RX %d buffers\n",
321                   iser_conn->qp_max_recv_dtos - 1);
322
323         /*
324          * Initial post receive buffers.
325          * There is one already posted recv buffer (for the last login
326          * response). Therefore, the first recv buffer is skipped here.
327          */
328         for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
329                 err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
330                 if (err)
331                         goto out;
332         }
333 out:
334         return err;
335 }
336
337 /**
338  * iser_send_command - send command PDU
339  * @conn: link to matching iscsi connection
340  * @task: SCSI command task
341  */
342 int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
343 {
344         struct iser_conn *iser_conn = conn->dd_data;
345         struct iscsi_iser_task *iser_task = task->dd_data;
346         unsigned long edtl;
347         int err;
348         struct iser_data_buf *data_buf, *prot_buf;
349         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
350         struct scsi_cmnd *sc  =  task->sc;
351         struct iser_tx_desc *tx_desc = &iser_task->desc;
352
353         edtl = ntohl(hdr->data_length);
354
355         /* build the tx desc regd header and add it to the tx desc dto */
356         iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND,
357                               iser_cmd_comp);
358
359         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
360                 data_buf = &iser_task->data[ISER_DIR_IN];
361                 prot_buf = &iser_task->prot[ISER_DIR_IN];
362         } else {
363                 data_buf = &iser_task->data[ISER_DIR_OUT];
364                 prot_buf = &iser_task->prot[ISER_DIR_OUT];
365         }
366
367         if (scsi_sg_count(sc)) { /* using a scatter list */
368                 data_buf->sg = scsi_sglist(sc);
369                 data_buf->size = scsi_sg_count(sc);
370         }
371         data_buf->data_len = scsi_bufflen(sc);
372
373         if (scsi_prot_sg_count(sc)) {
374                 prot_buf->sg  = scsi_prot_sglist(sc);
375                 prot_buf->size = scsi_prot_sg_count(sc);
376                 prot_buf->data_len = (data_buf->data_len >>
377                                      ilog2(sc->device->sector_size)) * 8;
378         }
379
380         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
381                 err = iser_prepare_read_cmd(task);
382                 if (err)
383                         goto send_command_error;
384         }
385         if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
386                 err = iser_prepare_write_cmd(task,
387                                              task->imm_count,
388                                              task->imm_count +
389                                              task->unsol_r2t.data_length,
390                                              edtl);
391                 if (err)
392                         goto send_command_error;
393         }
394
395         iser_task->status = ISER_TASK_STATUS_STARTED;
396
397         err = iser_post_send(&iser_conn->ib_conn, tx_desc);
398         if (!err)
399                 return 0;
400
401 send_command_error:
402         iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
403         return err;
404 }
405
406 /**
407  * iser_send_data_out - send data out PDU
408  * @conn: link to matching iscsi connection
409  * @task: SCSI command task
410  * @hdr: pointer to the LLD's iSCSI message header
411  */
412 int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
413                        struct iscsi_data *hdr)
414 {
415         struct iser_conn *iser_conn = conn->dd_data;
416         struct iscsi_iser_task *iser_task = task->dd_data;
417         struct iser_tx_desc *tx_desc;
418         struct iser_mem_reg *mem_reg;
419         unsigned long buf_offset;
420         unsigned long data_seg_len;
421         uint32_t itt;
422         int err;
423         struct ib_sge *tx_dsg;
424
425         itt = (__force uint32_t)hdr->itt;
426         data_seg_len = ntoh24(hdr->dlength);
427         buf_offset   = ntohl(hdr->offset);
428
429         iser_dbg("%s itt %d dseg_len %d offset %d\n",
430                  __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
431
432         tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
433         if (!tx_desc)
434                 return -ENOMEM;
435
436         tx_desc->type = ISCSI_TX_DATAOUT;
437         tx_desc->cqe.done = iser_dataout_comp;
438         tx_desc->iser_header.flags = ISER_VER;
439         memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
440
441         /* build the tx desc */
442         err = iser_initialize_task_headers(task, tx_desc);
443         if (err)
444                 goto send_data_out_error;
445
446         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
447         tx_dsg = &tx_desc->tx_sg[1];
448         tx_dsg->addr = mem_reg->sge.addr + buf_offset;
449         tx_dsg->length = data_seg_len;
450         tx_dsg->lkey = mem_reg->sge.lkey;
451         tx_desc->num_sge = 2;
452
453         if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
454                 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
455                          buf_offset, data_seg_len,
456                          iser_task->data[ISER_DIR_OUT].data_len, itt);
457                 err = -EINVAL;
458                 goto send_data_out_error;
459         }
460         iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
461                  itt, buf_offset, data_seg_len);
462
463         err = iser_post_send(&iser_conn->ib_conn, tx_desc);
464         if (!err)
465                 return 0;
466
467 send_data_out_error:
468         kmem_cache_free(ig.desc_cache, tx_desc);
469         iser_err("conn %p failed err %d\n", conn, err);
470         return err;
471 }
472
473 int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
474 {
475         struct iser_conn *iser_conn = conn->dd_data;
476         struct iscsi_iser_task *iser_task = task->dd_data;
477         struct iser_tx_desc *mdesc = &iser_task->desc;
478         unsigned long data_seg_len;
479         int err = 0;
480         struct iser_device *device;
481
482         /* build the tx desc regd header and add it to the tx desc dto */
483         iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL,
484                               iser_ctrl_comp);
485
486         device = iser_conn->ib_conn.device;
487
488         data_seg_len = ntoh24(task->hdr->dlength);
489
490         if (data_seg_len > 0) {
491                 struct iser_login_desc *desc = &iser_conn->login_desc;
492                 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
493
494                 if (task != conn->login_task) {
495                         iser_err("data present on non login task!!!\n");
496                         goto send_control_error;
497                 }
498
499                 ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
500                                            task->data_count, DMA_TO_DEVICE);
501
502                 memcpy(desc->req, task->data, task->data_count);
503
504                 ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
505                                               task->data_count, DMA_TO_DEVICE);
506
507                 tx_dsg->addr = desc->req_dma;
508                 tx_dsg->length = task->data_count;
509                 tx_dsg->lkey = device->pd->local_dma_lkey;
510                 mdesc->num_sge = 2;
511         }
512
513         if (task == conn->login_task) {
514                 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
515                          task->hdr->opcode, data_seg_len);
516                 err = iser_post_recvl(iser_conn);
517                 if (err)
518                         goto send_control_error;
519                 err = iser_post_rx_bufs(conn, task->hdr);
520                 if (err)
521                         goto send_control_error;
522         }
523
524         err = iser_post_send(&iser_conn->ib_conn, mdesc);
525         if (!err)
526                 return 0;
527
528 send_control_error:
529         iser_err("conn %p failed err %d\n",conn, err);
530         return err;
531 }
532
533 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
534 {
535         struct ib_conn *ib_conn = wc->qp->qp_context;
536         struct iser_conn *iser_conn = to_iser_conn(ib_conn);
537         struct iser_login_desc *desc = iser_login(wc->wr_cqe);
538         struct iscsi_hdr *hdr;
539         char *data;
540         int length;
541         bool full_feature_phase;
542
543         if (unlikely(wc->status != IB_WC_SUCCESS)) {
544                 iser_err_comp(wc, "login_rsp");
545                 return;
546         }
547
548         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
549                                    desc->rsp_dma, ISER_RX_LOGIN_SIZE,
550                                    DMA_FROM_DEVICE);
551
552         hdr = desc->rsp + sizeof(struct iser_ctrl);
553         data = desc->rsp + ISER_HEADERS_LEN;
554         length = wc->byte_len - ISER_HEADERS_LEN;
555         full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
556                               ISCSI_FULL_FEATURE_PHASE) &&
557                              (hdr->flags & ISCSI_FLAG_CMD_FINAL);
558
559         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
560                  hdr->itt, length);
561
562         iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
563
564         ib_dma_sync_single_for_device(ib_conn->device->ib_device,
565                                       desc->rsp_dma, ISER_RX_LOGIN_SIZE,
566                                       DMA_FROM_DEVICE);
567
568         if (!full_feature_phase ||
569             iser_conn->iscsi_conn->session->discovery_sess)
570                 return;
571
572         /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
573         iser_post_recvm(iser_conn, iser_conn->rx_descs);
574 }
575
576 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
577 {
578         if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
579                      (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
580                 iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
581                 return -EINVAL;
582         }
583
584         if (desc->sig_protected)
585                 desc->rsc.sig_mr->need_inval = false;
586         else
587                 desc->rsc.mr->need_inval = false;
588
589         return 0;
590 }
591
592 static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
593                                  struct iscsi_hdr *hdr)
594 {
595         if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
596                 struct iscsi_task *task;
597                 u32 rkey = wc->ex.invalidate_rkey;
598
599                 iser_dbg("conn %p: remote invalidation for rkey %#x\n",
600                          iser_conn, rkey);
601
602                 if (unlikely(!iser_conn->snd_w_inv)) {
603                         iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
604                                  iser_conn);
605                         return -EPROTO;
606                 }
607
608                 task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
609                 if (likely(task)) {
610                         struct iscsi_iser_task *iser_task = task->dd_data;
611                         struct iser_fr_desc *desc;
612
613                         if (iser_task->dir[ISER_DIR_IN]) {
614                                 desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
615                                 if (unlikely(iser_inv_desc(desc, rkey)))
616                                         return -EINVAL;
617                         }
618
619                         if (iser_task->dir[ISER_DIR_OUT]) {
620                                 desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
621                                 if (unlikely(iser_inv_desc(desc, rkey)))
622                                         return -EINVAL;
623                         }
624                 } else {
625                         iser_err("failed to get task for itt=%d\n", hdr->itt);
626                         return -EINVAL;
627                 }
628         }
629
630         return 0;
631 }
632
633
634 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
635 {
636         struct ib_conn *ib_conn = wc->qp->qp_context;
637         struct iser_conn *iser_conn = to_iser_conn(ib_conn);
638         struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
639         struct iscsi_hdr *hdr;
640         int length, err;
641
642         if (unlikely(wc->status != IB_WC_SUCCESS)) {
643                 iser_err_comp(wc, "task_rsp");
644                 return;
645         }
646
647         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
648                                    desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
649                                    DMA_FROM_DEVICE);
650
651         hdr = &desc->iscsi_header;
652         length = wc->byte_len - ISER_HEADERS_LEN;
653
654         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
655                  hdr->itt, length);
656
657         if (iser_check_remote_inv(iser_conn, wc, hdr)) {
658                 iscsi_conn_failure(iser_conn->iscsi_conn,
659                                    ISCSI_ERR_CONN_FAILED);
660                 return;
661         }
662
663         iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
664
665         ib_dma_sync_single_for_device(ib_conn->device->ib_device,
666                                       desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
667                                       DMA_FROM_DEVICE);
668
669         err = iser_post_recvm(iser_conn, desc);
670         if (err)
671                 iser_err("posting rx buffer err %d\n", err);
672 }
673
674 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
675 {
676         if (unlikely(wc->status != IB_WC_SUCCESS))
677                 iser_err_comp(wc, "command");
678 }
679
680 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
681 {
682         struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
683         struct iscsi_task *task;
684
685         if (unlikely(wc->status != IB_WC_SUCCESS)) {
686                 iser_err_comp(wc, "control");
687                 return;
688         }
689
690         /* this arithmetic is legal by libiscsi dd_data allocation */
691         task = (void *)desc - sizeof(struct iscsi_task);
692         if (task->hdr->itt == RESERVED_ITT)
693                 iscsi_put_task(task);
694 }
695
696 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
697 {
698         struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
699         struct ib_conn *ib_conn = wc->qp->qp_context;
700         struct iser_device *device = ib_conn->device;
701
702         if (unlikely(wc->status != IB_WC_SUCCESS))
703                 iser_err_comp(wc, "dataout");
704
705         ib_dma_unmap_single(device->ib_device, desc->dma_addr,
706                             ISER_HEADERS_LEN, DMA_TO_DEVICE);
707         kmem_cache_free(ig.desc_cache, desc);
708 }
709
710 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
711
712 {
713         iser_task->status = ISER_TASK_STATUS_INIT;
714
715         iser_task->dir[ISER_DIR_IN] = 0;
716         iser_task->dir[ISER_DIR_OUT] = 0;
717
718         iser_task->data[ISER_DIR_IN].data_len  = 0;
719         iser_task->data[ISER_DIR_OUT].data_len = 0;
720
721         iser_task->prot[ISER_DIR_IN].data_len  = 0;
722         iser_task->prot[ISER_DIR_OUT].data_len = 0;
723
724         iser_task->prot[ISER_DIR_IN].dma_nents = 0;
725         iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
726
727         memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
728                sizeof(struct iser_mem_reg));
729         memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
730                sizeof(struct iser_mem_reg));
731 }
732
733 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
734 {
735
736         if (iser_task->dir[ISER_DIR_IN]) {
737                 iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
738                 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
739                                          DMA_FROM_DEVICE);
740         }
741
742         if (iser_task->dir[ISER_DIR_OUT]) {
743                 iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
744                 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
745                                          DMA_TO_DEVICE);
746         }
747 }