1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
7 #include <target/target_core_base.h>
8 #include <target/target_core_fabric.h>
9 #include "efct_driver.h"
13 * lio_wq is used to call the LIO backed during creation or deletion of
14 * sessions. This brings serialization to the session management as we create
15 * single threaded work queue.
17 static struct workqueue_struct *lio_wq;
20 efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
24 put_unaligned_be64(wwn, a);
25 return snprintf(str, len, "%s%8phC", pre, a);
29 efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
36 "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
37 &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
41 "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
42 &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
49 *wwp = get_unaligned_be64(b);
54 efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
56 unsigned int cnt = size;
60 if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
63 /* validate we have enough characters for WWPN */
64 if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
67 rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
71 rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
79 efct_lio_tpg_enable_show(struct config_item *item, char *page)
81 struct se_portal_group *se_tpg = to_tpg(item);
82 struct efct_lio_tpg *tpg =
83 container_of(se_tpg, struct efct_lio_tpg, tpg);
85 return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
89 efct_lio_tpg_enable_store(struct config_item *item, const char *page,
92 struct se_portal_group *se_tpg = to_tpg(item);
93 struct efct_lio_tpg *tpg =
94 container_of(se_tpg, struct efct_lio_tpg, tpg);
99 if (!tpg->nport || !tpg->nport->efct) {
100 pr_err("%s: Unable to find EFCT device\n", __func__);
104 efct = tpg->nport->efct;
107 if (kstrtoul(page, 0, &op) < 0)
114 efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
116 ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
118 efct->tgt_efct.lio_nport = NULL;
119 efc_log_debug(efct, "cannot bring port online\n");
122 } else if (op == 0) {
123 efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
125 if (efc->domain && efc->domain->nport)
126 efct_scsi_tgt_del_nport(efc, efc->domain->nport);
128 tpg->enabled = false;
137 efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
139 struct se_portal_group *se_tpg = to_tpg(item);
140 struct efct_lio_tpg *tpg =
141 container_of(se_tpg, struct efct_lio_tpg, tpg);
143 return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
147 efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
150 struct se_portal_group *se_tpg = to_tpg(item);
151 struct efct_lio_tpg *tpg =
152 container_of(se_tpg, struct efct_lio_tpg, tpg);
153 struct efct_lio_vport *lio_vport = tpg->vport;
158 if (kstrtoul(page, 0, &op) < 0)
162 pr_err("Unable to find vport\n");
166 efct = lio_vport->efct;
171 efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
176 ret = efc_nport_vport_new(efc->domain,
177 lio_vport->npiv_wwpn,
178 lio_vport->npiv_wwnn,
179 U32_MAX, false, true,
182 efc_log_err(efct, "Failed to create Vport\n");
188 if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
189 lio_vport->npiv_wwpn, U32_MAX,
190 false, true, NULL, NULL)))
193 } else if (op == 0) {
194 efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
196 tpg->enabled = false;
197 /* only physical nport should exist, free lio_nport
198 * allocated in efct_lio_make_nport
201 efc_nport_vport_del(efct->efcport, efc->domain,
202 lio_vport->npiv_wwpn,
203 lio_vport->npiv_wwnn);
212 static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
214 struct efct_lio_tpg *tpg =
215 container_of(se_tpg, struct efct_lio_tpg, tpg);
217 return tpg->nport->wwpn_str;
220 static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
222 struct efct_lio_tpg *tpg =
223 container_of(se_tpg, struct efct_lio_tpg, tpg);
225 return tpg->vport->wwpn_str;
228 static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
230 struct efct_lio_tpg *tpg =
231 container_of(se_tpg, struct efct_lio_tpg, tpg);
236 static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
238 struct efct_lio_tpg *tpg =
239 container_of(se_tpg, struct efct_lio_tpg, tpg);
244 static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
249 static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
254 static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
256 struct efct_lio_tpg *tpg =
257 container_of(se_tpg, struct efct_lio_tpg, tpg);
259 return tpg->tpg_attrib.demo_mode_write_protect;
263 efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
265 struct efct_lio_tpg *tpg =
266 container_of(se_tpg, struct efct_lio_tpg, tpg);
268 return tpg->tpg_attrib.demo_mode_write_protect;
271 static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
273 struct efct_lio_tpg *tpg =
274 container_of(se_tpg, struct efct_lio_tpg, tpg);
276 return tpg->tpg_attrib.prod_mode_write_protect;
280 efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
282 struct efct_lio_tpg *tpg =
283 container_of(se_tpg, struct efct_lio_tpg, tpg);
285 return tpg->tpg_attrib.prod_mode_write_protect;
288 static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
293 static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
295 struct efct_scsi_tgt_io *ocp =
296 container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
297 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
299 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
300 return target_put_sess_cmd(se_cmd);
304 efct_lio_abort_tgt_cb(struct efct_io *io,
305 enum efct_scsi_io_status scsi_status,
306 u32 flags, void *arg)
308 efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
313 efct_lio_aborted_task(struct se_cmd *se_cmd)
315 struct efct_scsi_tgt_io *ocp =
316 container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
317 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
319 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
324 /* command has been aborted, cleanup here */
325 ocp->aborting = true;
326 ocp->err = EFCT_SCSI_STATUS_ABORTED;
327 /* terminate the exchange */
328 efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
331 static void efct_lio_release_cmd(struct se_cmd *se_cmd)
333 struct efct_scsi_tgt_io *ocp =
334 container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
335 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
336 struct efct *efct = io->efct;
338 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
339 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
340 efct_scsi_io_complete(io);
341 atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
344 static void efct_lio_close_session(struct se_session *se_sess)
346 struct efc_node *node = se_sess->fabric_sess_ptr;
348 pr_debug("se_sess=%p node=%p", se_sess, node);
351 pr_debug("node is NULL");
355 efc_node_post_shutdown(node, NULL);
358 static u32 efct_lio_sess_get_index(struct se_session *se_sess)
363 static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
367 static int efct_lio_get_cmd_state(struct se_cmd *cmd)
369 struct efct_scsi_tgt_io *ocp =
370 container_of(cmd, struct efct_scsi_tgt_io, cmd);
371 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
373 return io->tgt_io.state;
377 efct_lio_sg_map(struct efct_io *io)
379 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
380 struct se_cmd *cmd = &ocp->cmd;
382 ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
383 cmd->t_data_nents, cmd->data_direction);
384 if (ocp->seg_map_cnt == 0)
390 efct_lio_sg_unmap(struct efct_io *io)
392 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
393 struct se_cmd *cmd = &ocp->cmd;
395 if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
398 dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
399 ocp->seg_map_cnt, cmd->data_direction);
400 ocp->seg_map_cnt = 0;
404 efct_lio_status_done(struct efct_io *io,
405 enum efct_scsi_io_status scsi_status,
406 u32 flags, void *arg)
408 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
410 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
411 if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
412 efct_lio_io_printf(io, "callback completed with error=%d\n",
414 ocp->err = scsi_status;
416 if (ocp->seg_map_cnt)
417 efct_lio_sg_unmap(io);
419 efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
420 scsi_status, ocp->err, flags, ocp->ddir);
422 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
423 transport_generic_free_cmd(&io->tgt_io.cmd, 0);
428 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
429 u32 flags, void *arg);
432 efct_lio_write_pending(struct se_cmd *cmd)
434 struct efct_scsi_tgt_io *ocp =
435 container_of(cmd, struct efct_scsi_tgt_io, cmd);
436 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
437 struct efct_scsi_sgl *sgl = io->sgl;
438 struct scatterlist *sg;
439 u32 flags = 0, cnt, curcnt;
442 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
443 efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
444 cmd->transport_state, cmd->se_cmd_flags);
446 if (ocp->seg_cnt == 0) {
447 ocp->seg_cnt = cmd->t_data_nents;
449 if (efct_lio_sg_map(io)) {
450 efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
454 curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
455 curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
456 /* find current sg */
457 for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
461 for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
462 sgl[cnt].addr = sg_dma_address(sg);
463 sgl[cnt].dif_addr = 0;
464 sgl[cnt].len = sg_dma_len(sg);
465 length += sgl[cnt].len;
469 if (ocp->cur_seg == ocp->seg_cnt)
470 flags = EFCT_SCSI_LAST_DATAPHASE;
472 return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
473 efct_lio_datamove_done, NULL);
477 efct_lio_queue_data_in(struct se_cmd *cmd)
479 struct efct_scsi_tgt_io *ocp =
480 container_of(cmd, struct efct_scsi_tgt_io, cmd);
481 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
482 struct efct_scsi_sgl *sgl = io->sgl;
483 struct scatterlist *sg = NULL;
484 uint flags = 0, cnt = 0, curcnt = 0;
487 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
489 if (ocp->seg_cnt == 0) {
490 if (cmd->data_length) {
491 ocp->seg_cnt = cmd->t_data_nents;
493 if (efct_lio_sg_map(io)) {
494 efct_lio_io_printf(io,
495 "efct_lio_sg_map failed\n");
499 /* If command length is 0, send the response status */
500 struct efct_scsi_cmd_resp rsp;
502 memset(&rsp, 0, sizeof(rsp));
503 efct_lio_io_printf(io,
504 "cmd : %p length 0, send status\n",
506 return efct_scsi_send_resp(io, 0, &rsp,
507 efct_lio_status_done, NULL);
510 curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
512 while (cnt < curcnt) {
513 sg = &cmd->t_data_sg[ocp->cur_seg];
514 sgl[cnt].addr = sg_dma_address(sg);
515 sgl[cnt].dif_addr = 0;
516 if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
517 sgl[cnt].len = cmd->data_length - ocp->transferred_len;
519 sgl[cnt].len = sg_dma_len(sg);
521 ocp->transferred_len += sgl[cnt].len;
522 length += sgl[cnt].len;
525 if (ocp->transferred_len == cmd->data_length)
529 if (ocp->transferred_len == cmd->data_length) {
530 flags = EFCT_SCSI_LAST_DATAPHASE;
531 ocp->seg_cnt = ocp->cur_seg;
534 /* If there is residual, disable Auto Good Response */
535 if (cmd->residual_count)
536 flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
538 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
540 return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
541 efct_lio_datamove_done, NULL);
545 efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
548 struct efct_scsi_cmd_resp rsp;
549 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
550 struct se_cmd *cmd = &io->tgt_io.cmd;
553 if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
554 ocp->rsp_sent = true;
555 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
556 transport_generic_free_cmd(&io->tgt_io.cmd, 0);
560 /* send check condition if an error occurred */
561 memset(&rsp, 0, sizeof(rsp));
562 rsp.scsi_status = cmd->scsi_status;
563 rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
564 rsp.sense_data_length = cmd->scsi_sense_length;
566 /* Check for residual underrun or overrun */
567 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
568 rsp.residual = -cmd->residual_count;
569 else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
570 rsp.residual = cmd->residual_count;
572 rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
573 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
575 efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
576 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
577 transport_generic_free_cmd(&io->tgt_io.cmd, 0);
579 ocp->rsp_sent = true;
584 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
585 u32 flags, void *arg)
587 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
589 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
590 if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
591 efct_lio_io_printf(io, "callback completed with error=%d\n",
593 ocp->err = scsi_status;
595 efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
596 if (ocp->seg_map_cnt) {
597 if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
598 ocp->cur_seg < ocp->seg_cnt) {
601 efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
603 if (ocp->ddir == DMA_TO_DEVICE)
604 rc = efct_lio_write_pending(&ocp->cmd);
606 rc = efct_lio_queue_data_in(&ocp->cmd);
610 ocp->err = EFCT_SCSI_STATUS_ERROR;
611 efct_lio_io_printf(io, "could not continue command\n");
613 efct_lio_sg_unmap(io);
616 if (io->tgt_io.aborting) {
617 efct_lio_io_printf(io, "IO done aborted\n");
621 if (ocp->ddir == DMA_TO_DEVICE) {
622 efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
623 io->tgt_io.cmd.transport_state);
624 if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
625 transport_generic_request_failure(&io->tgt_io.cmd,
626 TCM_CHECK_CONDITION_ABORT_CMD);
627 efct_set_lio_io_state(io,
628 EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
630 efct_set_lio_io_state(io,
631 EFCT_LIO_STATE_TGT_EXECUTE_CMD);
632 target_execute_cmd(&io->tgt_io.cmd);
635 efct_lio_send_resp(io, scsi_status, flags);
641 efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
642 u32 flags, void *arg)
644 efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
645 &io->tgt_io.cmd, scsi_status, flags);
647 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
648 transport_generic_free_cmd(&io->tgt_io.cmd, 0);
653 efct_lio_null_tmf_done(struct efct_io *tmfio,
654 enum efct_scsi_io_status scsi_status,
655 u32 flags, void *arg)
657 efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
658 &tmfio->tgt_io.cmd, scsi_status, flags);
660 /* free struct efct_io only, no active se_cmd */
661 efct_scsi_io_complete(tmfio);
666 efct_lio_queue_status(struct se_cmd *cmd)
668 struct efct_scsi_cmd_resp rsp;
669 struct efct_scsi_tgt_io *ocp =
670 container_of(cmd, struct efct_scsi_tgt_io, cmd);
671 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
674 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
675 efct_lio_io_printf(io,
676 "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
677 cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
678 cmd->scsi_sense_length);
680 memset(&rsp, 0, sizeof(rsp));
681 rsp.scsi_status = cmd->scsi_status;
682 rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
683 rsp.sense_data_length = cmd->scsi_sense_length;
685 /* Check for residual underrun or overrun, mark negitive value for
686 * underrun to recognize in HW
688 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
689 rsp.residual = -cmd->residual_count;
690 else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
691 rsp.residual = cmd->residual_count;
693 rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
694 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
696 ocp->rsp_sent = true;
700 static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
702 struct efct_scsi_tgt_io *ocp =
703 container_of(cmd, struct efct_scsi_tgt_io, cmd);
704 struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
705 struct se_tmr_req *se_tmr = cmd->se_tmr_req;
708 efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
709 cmd, se_tmr->function, se_tmr->response);
710 switch (se_tmr->response) {
711 case TMR_FUNCTION_COMPLETE:
712 rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
714 case TMR_TASK_DOES_NOT_EXIST:
715 rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
717 case TMR_LUN_DOES_NOT_EXIST:
718 rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
720 case TMR_FUNCTION_REJECTED:
722 rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
725 efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
728 static struct efct *efct_find_wwpn(u64 wwpn)
732 /* Search for the HBA that has this WWPN */
733 list_for_each_entry(efct, &efct_devices, list_entry) {
735 if (wwpn == efct_get_wwpn(&efct->hw))
742 static struct se_wwn *
743 efct_lio_make_nport(struct target_fabric_configfs *tf,
744 struct config_group *group, const char *name)
746 struct efct_lio_nport *lio_nport;
751 ret = efct_lio_parse_wwn(name, &wwpn, 0);
755 efct = efct_find_wwpn(wwpn);
757 pr_err("cannot find EFCT for base wwpn %s\n", name);
758 return ERR_PTR(-ENXIO);
761 lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
763 return ERR_PTR(-ENOMEM);
765 lio_nport->efct = efct;
766 lio_nport->wwpn = wwpn;
767 efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
769 efct->tgt_efct.lio_nport = lio_nport;
771 return &lio_nport->nport_wwn;
774 static struct se_wwn *
775 efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
776 struct config_group *group, const char *name)
778 struct efct_lio_vport *lio_vport;
781 u64 p_wwpn, npiv_wwpn, npiv_wwnn;
782 char *p, *pbuf, tmp[128];
783 struct efct_lio_vport_list_t *vport_list;
784 struct fc_vport *new_fc_vport;
785 struct fc_vport_identifiers vport_id;
786 unsigned long flags = 0;
788 snprintf(tmp, sizeof(tmp), "%s", name);
791 p = strsep(&pbuf, "@");
794 pr_err("Unable to find separator operator(@)\n");
795 return ERR_PTR(-EINVAL);
798 ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
802 ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
807 efct = efct_find_wwpn(p_wwpn);
809 pr_err("cannot find EFCT for base wwpn %s\n", name);
810 return ERR_PTR(-ENXIO);
813 lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
815 return ERR_PTR(-ENOMEM);
817 lio_vport->efct = efct;
818 lio_vport->wwpn = p_wwpn;
819 lio_vport->npiv_wwpn = npiv_wwpn;
820 lio_vport->npiv_wwnn = npiv_wwnn;
822 efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
825 vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
828 return ERR_PTR(-ENOMEM);
831 vport_list->lio_vport = lio_vport;
833 memset(&vport_id, 0, sizeof(vport_id));
834 vport_id.port_name = npiv_wwpn;
835 vport_id.node_name = npiv_wwnn;
836 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
837 vport_id.vport_type = FC_PORTTYPE_NPIV;
838 vport_id.disable = false;
840 new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
842 efc_log_err(efct, "fc_vport_create failed\n");
845 return ERR_PTR(-ENOMEM);
848 lio_vport->fc_vport = new_fc_vport;
849 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
850 INIT_LIST_HEAD(&vport_list->list_entry);
851 list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
852 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
854 return &lio_vport->vport_wwn;
858 efct_lio_drop_nport(struct se_wwn *wwn)
860 struct efct_lio_nport *lio_nport =
861 container_of(wwn, struct efct_lio_nport, nport_wwn);
862 struct efct *efct = lio_nport->efct;
864 /* only physical nport should exist, free lio_nport allocated
865 * in efct_lio_make_nport.
867 kfree(efct->tgt_efct.lio_nport);
868 efct->tgt_efct.lio_nport = NULL;
872 efct_lio_npiv_drop_nport(struct se_wwn *wwn)
874 struct efct_lio_vport *lio_vport =
875 container_of(wwn, struct efct_lio_vport, vport_wwn);
876 struct efct_lio_vport_list_t *vport, *next_vport;
877 struct efct *efct = lio_vport->efct;
878 unsigned long flags = 0;
880 if (lio_vport->fc_vport)
881 fc_vport_terminate(lio_vport->fc_vport);
883 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
885 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
887 if (vport->lio_vport == lio_vport) {
888 list_del(&vport->list_entry);
889 kfree(vport->lio_vport);
894 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
897 static struct se_portal_group *
898 efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
900 struct efct_lio_nport *lio_nport =
901 container_of(wwn, struct efct_lio_nport, nport_wwn);
902 struct efct_lio_tpg *tpg;
907 if (strstr(name, "tpgt_") != name)
908 return ERR_PTR(-EINVAL);
909 if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
910 return ERR_PTR(-EINVAL);
912 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
914 return ERR_PTR(-ENOMEM);
916 tpg->nport = lio_nport;
918 tpg->enabled = false;
920 tpg->tpg_attrib.generate_node_acls = 1;
921 tpg->tpg_attrib.demo_mode_write_protect = 1;
922 tpg->tpg_attrib.cache_dynamic_acls = 1;
923 tpg->tpg_attrib.demo_mode_login_only = 1;
924 tpg->tpg_attrib.session_deletion_wait = 1;
926 ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
931 efct = lio_nport->efct;
932 efct->tgt_efct.tpg = tpg;
933 efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
935 xa_init(&efct->lookup);
940 efct_lio_drop_tpg(struct se_portal_group *se_tpg)
942 struct efct_lio_tpg *tpg =
943 container_of(se_tpg, struct efct_lio_tpg, tpg);
945 struct efct *efct = tpg->nport->efct;
947 efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
948 tpg->nport->efct->tgt_efct.tpg = NULL;
949 core_tpg_deregister(se_tpg);
950 xa_destroy(&efct->lookup);
954 static struct se_portal_group *
955 efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
957 struct efct_lio_vport *lio_vport =
958 container_of(wwn, struct efct_lio_vport, vport_wwn);
959 struct efct_lio_tpg *tpg;
964 efct = lio_vport->efct;
965 if (strstr(name, "tpgt_") != name)
966 return ERR_PTR(-EINVAL);
967 if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
968 return ERR_PTR(-EINVAL);
971 efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
972 return ERR_PTR(-EINVAL);
975 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
977 return ERR_PTR(-ENOMEM);
979 tpg->vport = lio_vport;
981 tpg->enabled = false;
983 tpg->tpg_attrib.generate_node_acls = 1;
984 tpg->tpg_attrib.demo_mode_write_protect = 1;
985 tpg->tpg_attrib.cache_dynamic_acls = 1;
986 tpg->tpg_attrib.demo_mode_login_only = 1;
987 tpg->tpg_attrib.session_deletion_wait = 1;
989 ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
995 lio_vport->tpg = tpg;
996 efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
1002 efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
1004 struct efct_lio_tpg *tpg =
1005 container_of(se_tpg, struct efct_lio_tpg, tpg);
1007 efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
1009 core_tpg_deregister(se_tpg);
1014 efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1016 struct efct_lio_nacl *nacl;
1019 if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
1022 nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
1023 nacl->nport_wwnn = wwnn;
1025 efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
1029 static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
1031 struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
1033 return tpg->tpg_attrib.demo_mode_login_only;
1037 efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
1039 struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
1041 return tpg->tpg_attrib.demo_mode_login_only;
1044 static struct efct_lio_tpg *
1045 efct_get_vport_tpg(struct efc_node *node)
1048 u64 wwpn = node->nport->wwpn;
1049 struct efct_lio_vport_list_t *vport, *next;
1050 struct efct_lio_vport *lio_vport = NULL;
1051 struct efct_lio_tpg *tpg = NULL;
1052 unsigned long flags = 0;
1054 efct = node->efc->base;
1055 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
1056 list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
1058 lio_vport = vport->lio_vport;
1059 if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
1060 efc_log_debug(efct, "found tpg on vport\n");
1061 tpg = lio_vport->tpg;
1065 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
1070 _efct_tgt_node_free(struct kref *arg)
1072 struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
1073 struct efc_node *node = tgt_node->node;
1075 efc_scsi_del_initiator_complete(node->efc, node);
1079 static int efct_session_cb(struct se_portal_group *se_tpg,
1080 struct se_session *se_sess, void *private)
1082 struct efc_node *node = private;
1083 struct efct_node *tgt_node;
1084 struct efct *efct = node->efc->base;
1086 tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
1090 kref_init(&tgt_node->ref);
1091 tgt_node->release = _efct_tgt_node_free;
1093 tgt_node->session = se_sess;
1094 node->tgt_node = tgt_node;
1095 tgt_node->efct = efct;
1097 tgt_node->node = node;
1099 tgt_node->node_fc_id = node->rnode.fc_id;
1100 tgt_node->port_fc_id = node->nport->fc_id;
1101 tgt_node->vpi = node->nport->indicator;
1102 tgt_node->rpi = node->rnode.indicator;
1104 spin_lock_init(&tgt_node->active_ios_lock);
1105 INIT_LIST_HEAD(&tgt_node->active_ios);
1110 int efct_scsi_tgt_new_device(struct efct *efct)
1114 /* Get the max settings */
1115 efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
1116 efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
1118 /* initialize IO watermark fields */
1119 atomic_set(&efct->tgt_efct.ios_in_use, 0);
1120 total_ios = efct->hw.config.n_io;
1121 efc_log_debug(efct, "total_ios=%d\n", total_ios);
1122 efct->tgt_efct.watermark_min =
1123 (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
1124 efct->tgt_efct.watermark_max =
1125 (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
1126 atomic_set(&efct->tgt_efct.io_high_watermark,
1127 efct->tgt_efct.watermark_max);
1128 atomic_set(&efct->tgt_efct.watermark_hit, 0);
1129 atomic_set(&efct->tgt_efct.initiator_count, 0);
1131 lio_wq = create_singlethread_workqueue("efct_lio_worker");
1133 efc_log_err(efct, "workqueue create failed\n");
1137 spin_lock_init(&efct->tgt_efct.efct_lio_lock);
1138 INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
1143 int efct_scsi_tgt_del_device(struct efct *efct)
1145 flush_workqueue(lio_wq);
1151 efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
1153 struct efct *efct = nport->efc->base;
1155 efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
1156 efct->tgt_efct.lio_nport->wwpn_str);
1162 efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
1164 efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
1167 static void efct_lio_setup_session(struct work_struct *work)
1169 struct efct_lio_wq_data *wq_data =
1170 container_of(work, struct efct_lio_wq_data, work);
1171 struct efct *efct = wq_data->efct;
1172 struct efc_node *node = wq_data->ptr;
1173 char wwpn[WWN_NAME_LEN];
1174 struct efct_lio_tpg *tpg;
1175 struct efct_node *tgt_node;
1176 struct se_portal_group *se_tpg;
1177 struct se_session *se_sess;
1182 /* Check to see if it's belongs to vport,
1183 * if not get physical port
1185 tpg = efct_get_vport_tpg(node);
1188 } else if (efct->tgt_efct.tpg) {
1189 tpg = efct->tgt_efct.tpg;
1192 efc_log_err(efct, "failed to init session\n");
1197 * Format the FCP Initiator port_name into colon
1198 * separated values to match the format by our explicit
1199 * ConfigFS NodeACLs.
1201 efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
1203 se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
1204 node, efct_session_cb);
1205 if (IS_ERR(se_sess)) {
1206 efc_log_err(efct, "failed to setup session\n");
1208 efc_scsi_sess_reg_complete(node, -EIO);
1212 tgt_node = node->tgt_node;
1213 id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
1215 efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
1218 if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
1219 efc_log_err(efct, "Node lookup store failed\n");
1221 efc_scsi_sess_reg_complete(node, 0);
1223 /* update IO watermark: increment initiator count */
1224 ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
1225 watermark = efct->tgt_efct.watermark_max -
1226 ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
1227 watermark = (efct->tgt_efct.watermark_min > watermark) ?
1228 efct->tgt_efct.watermark_min : watermark;
1229 atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
1234 int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
1236 struct efct *efct = node->efc->base;
1237 struct efct_lio_wq_data *wq_data;
1240 * Since LIO only supports initiator validation at thread level,
1241 * we are open minded and accept all callers.
1243 wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
1247 wq_data->ptr = node;
1248 wq_data->efct = efct;
1249 INIT_WORK(&wq_data->work, efct_lio_setup_session);
1250 queue_work(lio_wq, &wq_data->work);
1251 return EFC_SCSI_CALL_ASYNC;
1254 static void efct_lio_remove_session(struct work_struct *work)
1256 struct efct_lio_wq_data *wq_data =
1257 container_of(work, struct efct_lio_wq_data, work);
1258 struct efct *efct = wq_data->efct;
1259 struct efc_node *node = wq_data->ptr;
1260 struct efct_node *tgt_node;
1261 struct se_session *se_sess;
1263 tgt_node = node->tgt_node;
1265 /* base driver has sent back-to-back requests
1266 * to unreg session with no intervening
1269 efc_log_err(efct, "unreg session for NULL session\n");
1270 efc_scsi_del_initiator_complete(node->efc, node);
1274 se_sess = tgt_node->session;
1275 efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
1278 /* first flag all session commands to complete */
1279 target_stop_session(se_sess);
1281 /* now wait for session commands to complete */
1282 target_wait_for_sess_cmds(se_sess);
1283 target_remove_session(se_sess);
1284 tgt_node->session = NULL;
1285 node->tgt_node = NULL;
1286 kref_put(&tgt_node->ref, tgt_node->release);
1291 int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
1293 struct efct *efct = node->efc->base;
1294 struct efct_node *tgt_node = node->tgt_node;
1295 struct efct_lio_wq_data *wq_data;
1300 if (reason == EFCT_SCSI_INITIATOR_MISSING)
1301 return EFC_SCSI_CALL_COMPLETE;
1304 efc_log_err(efct, "tgt_node is NULL\n");
1308 wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
1312 id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
1313 xa_erase(&efct->lookup, id);
1315 wq_data->ptr = node;
1316 wq_data->efct = efct;
1317 INIT_WORK(&wq_data->work, efct_lio_remove_session);
1318 queue_work(lio_wq, &wq_data->work);
1321 * update IO watermark: decrement initiator count
1323 ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
1325 watermark = efct->tgt_efct.watermark_max -
1326 ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
1327 watermark = (efct->tgt_efct.watermark_min > watermark) ?
1328 efct->tgt_efct.watermark_min : watermark;
1329 atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
1331 return EFC_SCSI_CALL_ASYNC;
1334 void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
1335 u32 cdb_len, u32 flags)
1337 struct efct_scsi_tgt_io *ocp = &io->tgt_io;
1338 struct se_cmd *se_cmd = &io->tgt_io.cmd;
1339 struct efct *efct = io->efct;
1341 struct efct_node *tgt_node;
1342 struct se_session *se_sess;
1345 memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
1346 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
1347 atomic_add_return(1, &efct->tgt_efct.ios_in_use);
1349 /* set target timeout */
1350 io->timeout = efct->target_io_timer_sec;
1352 if (flags & EFCT_SCSI_CMD_SIMPLE)
1353 ocp->task_attr = TCM_SIMPLE_TAG;
1354 else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
1355 ocp->task_attr = TCM_HEAD_TAG;
1356 else if (flags & EFCT_SCSI_CMD_ORDERED)
1357 ocp->task_attr = TCM_ORDERED_TAG;
1358 else if (flags & EFCT_SCSI_CMD_ACA)
1359 ocp->task_attr = TCM_ACA_TAG;
1361 switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
1362 case EFCT_SCSI_CMD_DIR_IN:
1363 ddir = "FROM_INITIATOR";
1364 ocp->ddir = DMA_TO_DEVICE;
1366 case EFCT_SCSI_CMD_DIR_OUT:
1367 ddir = "TO_INITIATOR";
1368 ocp->ddir = DMA_FROM_DEVICE;
1370 case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
1372 ocp->ddir = DMA_BIDIRECTIONAL;
1376 ocp->ddir = DMA_NONE;
1381 efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
1382 cdb[0], ddir, io->exp_xfer_len);
1384 tgt_node = io->node;
1385 se_sess = tgt_node->session;
1387 efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
1389 efct_scsi_io_free(io);
1393 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
1394 rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
1395 ocp->lun, io->exp_xfer_len, ocp->task_attr,
1396 ocp->ddir, TARGET_SCF_ACK_KREF);
1398 efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
1399 efct_scsi_io_free(io);
1403 if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
1404 NULL, 0, GFP_ATOMIC))
1407 target_submit(se_cmd);
1411 efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
1412 struct efct_io *io_to_abort, u32 flags)
1414 unsigned char tmr_func;
1415 struct efct *efct = tmfio->efct;
1416 struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
1417 struct efct_node *tgt_node;
1418 struct se_session *se_sess;
1421 memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
1422 efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
1423 atomic_add_return(1, &efct->tgt_efct.ios_in_use);
1424 efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
1425 tmfio->display_name, cmd, lun);
1428 case EFCT_SCSI_TMF_ABORT_TASK:
1429 tmr_func = TMR_ABORT_TASK;
1431 case EFCT_SCSI_TMF_ABORT_TASK_SET:
1432 tmr_func = TMR_ABORT_TASK_SET;
1434 case EFCT_SCSI_TMF_CLEAR_TASK_SET:
1435 tmr_func = TMR_CLEAR_TASK_SET;
1437 case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
1438 tmr_func = TMR_LUN_RESET;
1440 case EFCT_SCSI_TMF_CLEAR_ACA:
1441 tmr_func = TMR_CLEAR_ACA;
1443 case EFCT_SCSI_TMF_TARGET_RESET:
1444 tmr_func = TMR_TARGET_WARM_RESET;
1446 case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
1447 case EFCT_SCSI_TMF_QUERY_TASK_SET:
1452 tmfio->tgt_io.tmf = tmr_func;
1453 tmfio->tgt_io.lun = lun;
1454 tmfio->tgt_io.io_to_abort = io_to_abort;
1456 tgt_node = tmfio->node;
1458 se_sess = tgt_node->session;
1462 rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
1463 GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
1465 efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
1472 efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
1473 NULL, efct_lio_null_tmf_done, NULL);
1477 /* Start items for efct_lio_tpg_attrib_cit */
1479 #define DEF_EFCT_TPG_ATTRIB(name) \
1481 static ssize_t efct_lio_tpg_attrib_##name##_show( \
1482 struct config_item *item, char *page) \
1484 struct se_portal_group *se_tpg = to_tpg(item); \
1485 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1486 struct efct_lio_tpg, tpg); \
1488 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
1491 static ssize_t efct_lio_tpg_attrib_##name##_store( \
1492 struct config_item *item, const char *page, size_t count) \
1494 struct se_portal_group *se_tpg = to_tpg(item); \
1495 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1496 struct efct_lio_tpg, tpg); \
1497 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
1498 unsigned long val; \
1501 ret = kstrtoul(page, 0, &val); \
1503 pr_err("kstrtoul() failed with ret: %d\n", ret); \
1507 if (val != 0 && val != 1) { \
1508 pr_err("Illegal boolean value %lu\n", val); \
1516 CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
1518 DEF_EFCT_TPG_ATTRIB(generate_node_acls);
1519 DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
1520 DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
1521 DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
1522 DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
1523 DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
1525 static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
1526 &efct_lio_tpg_attrib_attr_generate_node_acls,
1527 &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
1528 &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
1529 &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
1530 &efct_lio_tpg_attrib_attr_demo_mode_login_only,
1531 &efct_lio_tpg_attrib_attr_session_deletion_wait,
1535 #define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
1537 static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
1538 struct config_item *item, char *page) \
1540 struct se_portal_group *se_tpg = to_tpg(item); \
1541 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1542 struct efct_lio_tpg, tpg); \
1544 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
1547 static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
1548 struct config_item *item, const char *page, size_t count) \
1550 struct se_portal_group *se_tpg = to_tpg(item); \
1551 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1552 struct efct_lio_tpg, tpg); \
1553 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
1554 unsigned long val; \
1557 ret = kstrtoul(page, 0, &val); \
1559 pr_err("kstrtoul() failed with ret: %d\n", ret); \
1563 if (val != 0 && val != 1) { \
1564 pr_err("Illegal boolean value %lu\n", val); \
1572 CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
1574 DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
1575 DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
1576 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
1577 DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
1578 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
1579 DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
1581 static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
1582 &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
1583 &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
1584 &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
1585 &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
1586 &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
1587 &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
1591 CONFIGFS_ATTR(efct_lio_tpg_, enable);
1592 static struct configfs_attribute *efct_lio_tpg_attrs[] = {
1593 &efct_lio_tpg_attr_enable, NULL };
1594 CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
1595 static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
1596 &efct_lio_npiv_tpg_attr_enable, NULL };
1598 static const struct target_core_fabric_ops efct_lio_ops = {
1599 .module = THIS_MODULE,
1600 .fabric_name = "efct",
1601 .node_acl_size = sizeof(struct efct_lio_nacl),
1602 .max_data_sg_nents = 65535,
1603 .tpg_get_wwn = efct_lio_get_fabric_wwn,
1604 .tpg_get_tag = efct_lio_get_tag,
1605 .fabric_init_nodeacl = efct_lio_init_nodeacl,
1606 .tpg_check_demo_mode = efct_lio_check_demo_mode,
1607 .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
1608 .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
1609 .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
1610 .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
1611 .check_stop_free = efct_lio_check_stop_free,
1612 .aborted_task = efct_lio_aborted_task,
1613 .release_cmd = efct_lio_release_cmd,
1614 .close_session = efct_lio_close_session,
1615 .sess_get_index = efct_lio_sess_get_index,
1616 .write_pending = efct_lio_write_pending,
1617 .set_default_node_attributes = efct_lio_set_default_node_attrs,
1618 .get_cmd_state = efct_lio_get_cmd_state,
1619 .queue_data_in = efct_lio_queue_data_in,
1620 .queue_status = efct_lio_queue_status,
1621 .queue_tm_rsp = efct_lio_queue_tm_rsp,
1622 .fabric_make_wwn = efct_lio_make_nport,
1623 .fabric_drop_wwn = efct_lio_drop_nport,
1624 .fabric_make_tpg = efct_lio_make_tpg,
1625 .fabric_drop_tpg = efct_lio_drop_tpg,
1626 .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
1627 .tpg_check_prot_fabric_only = NULL,
1628 .sess_get_initiator_sid = NULL,
1629 .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
1630 .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
1633 static const struct target_core_fabric_ops efct_lio_npiv_ops = {
1634 .module = THIS_MODULE,
1635 .fabric_name = "efct_npiv",
1636 .node_acl_size = sizeof(struct efct_lio_nacl),
1637 .max_data_sg_nents = 65535,
1638 .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
1639 .tpg_get_tag = efct_lio_get_npiv_tag,
1640 .fabric_init_nodeacl = efct_lio_init_nodeacl,
1641 .tpg_check_demo_mode = efct_lio_check_demo_mode,
1642 .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
1643 .tpg_check_demo_mode_write_protect =
1644 efct_lio_npiv_check_demo_write_protect,
1645 .tpg_check_prod_mode_write_protect =
1646 efct_lio_npiv_check_prod_write_protect,
1647 .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
1648 .check_stop_free = efct_lio_check_stop_free,
1649 .aborted_task = efct_lio_aborted_task,
1650 .release_cmd = efct_lio_release_cmd,
1651 .close_session = efct_lio_close_session,
1652 .sess_get_index = efct_lio_sess_get_index,
1653 .write_pending = efct_lio_write_pending,
1654 .set_default_node_attributes = efct_lio_set_default_node_attrs,
1655 .get_cmd_state = efct_lio_get_cmd_state,
1656 .queue_data_in = efct_lio_queue_data_in,
1657 .queue_status = efct_lio_queue_status,
1658 .queue_tm_rsp = efct_lio_queue_tm_rsp,
1659 .fabric_make_wwn = efct_lio_npiv_make_nport,
1660 .fabric_drop_wwn = efct_lio_npiv_drop_nport,
1661 .fabric_make_tpg = efct_lio_npiv_make_tpg,
1662 .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
1663 .tpg_check_demo_mode_login_only =
1664 efct_lio_npiv_check_demo_mode_login_only,
1665 .tpg_check_prot_fabric_only = NULL,
1666 .sess_get_initiator_sid = NULL,
1667 .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
1668 .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
1671 int efct_scsi_tgt_driver_init(void)
1675 /* Register the top level struct config_item_type with TCM core */
1676 rc = target_register_template(&efct_lio_ops);
1678 pr_err("target_fabric_configfs_register failed with %d\n", rc);
1681 rc = target_register_template(&efct_lio_npiv_ops);
1683 pr_err("target_fabric_configfs_register failed with %d\n", rc);
1684 target_unregister_template(&efct_lio_ops);
1690 int efct_scsi_tgt_driver_exit(void)
1692 target_unregister_template(&efct_lio_ops);
1693 target_unregister_template(&efct_lio_npiv_ops);