1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
10 efc_remote_node_cb(void *arg, int event, void *data)
12 struct efc *efc = arg;
13 struct efc_remote_node *rnode = data;
14 struct efc_node *node = rnode->node;
15 unsigned long flags = 0;
17 spin_lock_irqsave(&efc->lock, flags);
18 efc_node_post_event(node, event, NULL);
19 spin_unlock_irqrestore(&efc->lock, flags);
25 efc_node_find(struct efc_nport *nport, u32 port_id)
27 /* Find an FC node structure given the FC port ID */
28 return xa_load(&nport->lookup, port_id);
32 _efc_node_free(struct kref *arg)
34 struct efc_node *node = container_of(arg, struct efc_node, ref);
35 struct efc *efc = node->efc;
38 dma = &node->sparm_dma_buf;
39 dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
40 memset(dma, 0, sizeof(struct efc_dma));
41 mempool_free(node, efc->node_pool);
44 struct efc_node *efc_node_alloc(struct efc_nport *nport,
45 u32 port_id, bool init, bool targ)
48 struct efc_node *node = NULL;
49 struct efc *efc = nport->efc;
52 if (nport->shutting_down) {
53 efc_log_debug(efc, "node allocation when shutting down %06x",
58 node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
60 efc_log_err(efc, "node allocation failed %06x", port_id);
63 memset(node, 0, sizeof(*node));
65 dma = &node->sparm_dma_buf;
66 dma->size = NODE_SPARAMS_SIZE;
67 dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
69 efc_log_err(efc, "node dma alloc failed\n");
72 node->rnode.indicator = U32_MAX;
79 spin_lock_init(&node->pend_frames_lock);
80 INIT_LIST_HEAD(&node->pend_frames);
81 spin_lock_init(&node->els_ios_lock);
82 INIT_LIST_HEAD(&node->els_ios_list);
83 node->els_io_enabled = true;
85 rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
87 efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
91 node->rnode.node = node;
95 efc_node_update_display_name(node);
97 rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
99 efc_log_err(efc, "Node lookup store failed: %d\n", rc);
103 /* initialize refcount */
104 kref_init(&node->ref);
105 node->release = _efc_node_free;
106 kref_get(&nport->ref);
111 efc_node_free_resources(efc, &node->rnode);
113 dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
115 mempool_free(node, efc->node_pool);
120 efc_node_free(struct efc_node *node)
122 struct efc_nport *nport;
125 struct efc_node *ns = NULL;
130 node_printf(node, "Free'd\n");
134 * Save the name server node. We will send fake RSCN event at
135 * the end to handle ignored RSCN event during node deletion
137 ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
141 efc_log_err(efc, "Node already Freed\n");
145 /* Free HW resources */
146 rc = efc_node_free_resources(efc, &node->rnode);
148 efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
150 /* if the gidpt_delay_timer is still running, then delete it */
151 if (timer_pending(&node->gidpt_delay_timer))
152 del_timer(&node->gidpt_delay_timer);
154 xa_erase(&nport->lookup, node->rnode.fc_id);
157 * If the node_list is empty,
158 * then post a ALL_CHILD_NODES_FREE event to the nport,
159 * after the lock is released.
160 * The nport may be free'd as a result of the event.
162 if (xa_empty(&nport->lookup))
163 efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
167 node->sm.current_state = NULL;
169 kref_put(&nport->ref, nport->release);
170 kref_put(&node->ref, node->release);
173 /* sending fake RSCN event to name server node */
174 efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
179 efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
181 if (!dma || !buffer || !buffer_length)
184 if (buffer_length > dma->size)
185 buffer_length = dma->size;
187 memcpy(dma->virt, buffer, buffer_length);
188 dma->len = buffer_length;
192 efc_node_attach(struct efc_node *node)
195 struct efc_nport *nport = node->nport;
196 struct efc_domain *domain = nport->domain;
197 struct efc *efc = node->efc;
199 if (!domain->attached) {
200 efc_log_err(efc, "Warning: unattached domain\n");
203 /* Update node->wwpn/wwnn */
205 efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
206 efc_node_get_wwpn(node));
207 efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
208 efc_node_get_wwnn(node));
210 efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
211 sizeof(node->service_params) - 4);
213 /* take lock to protect node->rnode.attached */
214 rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
216 efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
222 efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
226 snprintf(buffer, buffer_length, "fabric");
229 snprintf(buffer, buffer_length, "fabctl");
231 case FC_FID_DIR_SERV:
232 snprintf(buffer, buffer_length, "nserve");
235 if (fc_id == FC_FID_DOM_MGR) {
236 snprintf(buffer, buffer_length, "dctl%02x",
239 snprintf(buffer, buffer_length, "%06x", fc_id);
246 efc_node_update_display_name(struct efc_node *node)
248 u32 port_id = node->rnode.fc_id;
249 struct efc_nport *nport = node->nport;
250 char portid_display[16];
252 efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
254 snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
255 nport->display_name, portid_display);
259 efc_node_send_ls_io_cleanup(struct efc_node *node)
261 if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
262 efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
263 node->display_name, node->ls_acc_oxid);
265 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
266 node->ls_acc_io = NULL;
270 static void efc_node_handle_implicit_logo(struct efc_node *node)
275 * currently, only case for implicit logo is PLOGI
276 * recvd. Thus, node's ELS IO pending list won't be
277 * empty (PLOGI will be on it)
279 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
280 node_printf(node, "Reason: implicit logout, re-authenticate\n");
282 /* Re-attach node with the same HW node resources */
283 node->req_free = false;
284 rc = efc_node_attach(node);
285 efc_node_transition(node, __efc_d_wait_node_attach, NULL);
286 node->els_io_enabled = true;
289 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
292 static void efc_node_handle_explicit_logo(struct efc_node *node)
294 s8 pend_frames_empty;
295 unsigned long flags = 0;
297 /* cleanup any pending LS_ACC ELSs */
298 efc_node_send_ls_io_cleanup(node);
300 spin_lock_irqsave(&node->pend_frames_lock, flags);
301 pend_frames_empty = list_empty(&node->pend_frames);
302 spin_unlock_irqrestore(&node->pend_frames_lock, flags);
305 * there are two scenarios where we want to keep
307 * 1. there are pending frames that need to be
309 * 2. we're an initiator and the remote node is
310 * a target and we need to re-authenticate
312 node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
313 node_printf(node, "nport.ini=%d node.tgt=%d\n",
314 node->nport->enable_ini, node->targ);
315 if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
316 u8 send_plogi = false;
318 if (node->nport->enable_ini && node->targ) {
320 * we're an initiator and
321 * node shutting down is a target;
322 * we'll need to re-authenticate in
329 * transition to __efc_d_init
330 * (will retain HW node resources)
332 node->els_io_enabled = true;
333 node->req_free = false;
336 * either pending frames exist or we are re-authenticating
337 * with PLOGI (or both); in either case, return to initial
340 efc_node_init_device(node, send_plogi);
342 /* else: let node shutdown occur */
346 efc_node_purge_pending(struct efc_node *node)
348 struct efc *efc = node->efc;
349 struct efc_hw_sequence *frame, *next;
350 unsigned long flags = 0;
352 spin_lock_irqsave(&node->pend_frames_lock, flags);
354 list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
355 list_del(&frame->list_entry);
356 efc->tt.hw_seq_free(efc, frame);
359 spin_unlock_irqrestore(&node->pend_frames_lock, flags);
363 __efc_node_shutdown(struct efc_sm_ctx *ctx,
364 enum efc_sm_event evt, void *arg)
366 struct efc_node *node = ctx->app;
368 efc_node_evt_set(ctx, evt, __func__);
373 case EFC_EVT_ENTER: {
374 efc_node_hold_frames(node);
375 WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
376 /* by default, we will be freeing node after we unwind */
377 node->req_free = true;
379 switch (node->shutdown_reason) {
380 case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
381 /* Node shutdown b/c of PLOGI received when node
382 * already logged in. We have PLOGI service
383 * parameters, so submit node attach; we won't be
387 efc_node_handle_implicit_logo(node);
390 case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
391 efc_node_handle_explicit_logo(node);
394 case EFC_NODE_SHUTDOWN_DEFAULT:
397 * shutdown due to link down,
398 * node going away (xport event) or
399 * nport shutdown, purge pending and
400 * proceed to cleanup node
403 /* cleanup any pending LS_ACC ELSs */
404 efc_node_send_ls_io_cleanup(node);
407 "Shutdown reason: default, purge pending\n");
408 efc_node_purge_pending(node);
416 efc_node_accept_frames(node);
420 __efc_node_common(__func__, ctx, evt, arg);
425 efc_node_check_els_quiesced(struct efc_node *node)
427 /* check to see if ELS requests, completions are quiesced */
428 if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
429 efc_els_io_list_empty(node, &node->els_ios_list)) {
430 if (!node->attached) {
431 /* hw node detach already completed, proceed */
432 node_printf(node, "HW node not attached\n");
433 efc_node_transition(node,
434 __efc_node_wait_ios_shutdown,
438 * hw node detach hasn't completed,
439 * transition and wait
441 node_printf(node, "HW node still attached\n");
442 efc_node_transition(node, __efc_node_wait_node_free,
451 efc_node_initiate_cleanup(struct efc_node *node)
454 * if ELS's have already been quiesced, will move to next state
455 * if ELS's have not been quiesced, abort them
457 if (!efc_node_check_els_quiesced(node)) {
458 efc_node_hold_frames(node);
459 efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
464 __efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
465 enum efc_sm_event evt, void *arg)
467 bool check_quiesce = false;
468 struct efc_node *node = ctx->app;
470 efc_node_evt_set(ctx, evt, __func__);
473 /* Node state machine: Wait for all ELSs to complete */
476 efc_node_hold_frames(node);
477 if (efc_els_io_list_empty(node, &node->els_ios_list)) {
478 node_printf(node, "All ELS IOs complete\n");
479 check_quiesce = true;
483 efc_node_accept_frames(node);
486 case EFC_EVT_SRRS_ELS_REQ_OK:
487 case EFC_EVT_SRRS_ELS_REQ_FAIL:
488 case EFC_EVT_SRRS_ELS_REQ_RJT:
489 case EFC_EVT_ELS_REQ_ABORTED:
490 if (WARN_ON(!node->els_req_cnt))
493 check_quiesce = true;
496 case EFC_EVT_SRRS_ELS_CMPL_OK:
497 case EFC_EVT_SRRS_ELS_CMPL_FAIL:
498 if (WARN_ON(!node->els_cmpl_cnt))
500 node->els_cmpl_cnt--;
501 check_quiesce = true;
504 case EFC_EVT_ALL_CHILD_NODES_FREE:
505 /* all ELS IO's complete */
506 node_printf(node, "All ELS IOs complete\n");
507 WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
508 check_quiesce = true;
511 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
512 check_quiesce = true;
515 case EFC_EVT_DOMAIN_ATTACH_OK:
516 /* don't care about domain_attach_ok */
519 /* ignore shutdown events as we're already in shutdown path */
520 case EFC_EVT_SHUTDOWN:
521 /* have default shutdown event take precedence */
522 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
525 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
526 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
527 node_printf(node, "%s received\n", efc_sm_event_name(evt));
531 __efc_node_common(__func__, ctx, evt, arg);
535 efc_node_check_els_quiesced(node);
539 __efc_node_wait_node_free(struct efc_sm_ctx *ctx,
540 enum efc_sm_event evt, void *arg)
542 struct efc_node *node = ctx->app;
544 efc_node_evt_set(ctx, evt, __func__);
550 efc_node_hold_frames(node);
554 efc_node_accept_frames(node);
557 case EFC_EVT_NODE_FREE_OK:
558 /* node is officially no longer attached */
559 node->attached = false;
560 efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
563 case EFC_EVT_ALL_CHILD_NODES_FREE:
564 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
565 /* As IOs and ELS IO's complete we expect to get these events */
568 case EFC_EVT_DOMAIN_ATTACH_OK:
569 /* don't care about domain_attach_ok */
572 /* ignore shutdown events as we're already in shutdown path */
573 case EFC_EVT_SHUTDOWN:
574 /* have default shutdown event take precedence */
575 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
578 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
579 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
580 node_printf(node, "%s received\n", efc_sm_event_name(evt));
583 __efc_node_common(__func__, ctx, evt, arg);
588 __efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
589 enum efc_sm_event evt, void *arg)
591 struct efc_node *node = ctx->app;
592 struct efc *efc = node->efc;
594 efc_node_evt_set(ctx, evt, __func__);
600 efc_node_hold_frames(node);
602 /* first check to see if no ELS IOs are outstanding */
603 if (efc_els_io_list_empty(node, &node->els_ios_list))
604 /* If there are any active IOS, Free them. */
605 efc_node_transition(node, __efc_node_shutdown, NULL);
608 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
609 case EFC_EVT_ALL_CHILD_NODES_FREE:
610 if (efc_els_io_list_empty(node, &node->els_ios_list))
611 efc_node_transition(node, __efc_node_shutdown, NULL);
615 efc_node_accept_frames(node);
618 case EFC_EVT_SRRS_ELS_REQ_FAIL:
619 /* Can happen as ELS IO IO's complete */
620 if (WARN_ON(!node->els_req_cnt))
625 /* ignore shutdown events as we're already in shutdown path */
626 case EFC_EVT_SHUTDOWN:
627 /* have default shutdown event take precedence */
628 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
631 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
632 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
633 efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
634 efc_sm_event_name(evt));
636 case EFC_EVT_DOMAIN_ATTACH_OK:
637 /* don't care about domain_attach_ok */
640 __efc_node_common(__func__, ctx, evt, arg);
645 __efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
646 enum efc_sm_event evt, void *arg)
648 struct efc_node *node = NULL;
649 struct efc *efc = NULL;
650 struct efc_node_cb *cbdata = arg;
657 case EFC_EVT_REENTER:
659 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
660 case EFC_EVT_NODE_MISSING:
661 case EFC_EVT_FCP_CMD_RCVD:
664 case EFC_EVT_NODE_REFOUND:
665 node->refound = true;
669 * node->attached must be set appropriately
670 * for all node attach/detach events
672 case EFC_EVT_NODE_ATTACH_OK:
673 node->attached = true;
676 case EFC_EVT_NODE_FREE_OK:
677 case EFC_EVT_NODE_ATTACH_FAIL:
678 node->attached = false;
682 * handle any ELS completions that
683 * other states either didn't care about
686 case EFC_EVT_SRRS_ELS_CMPL_OK:
687 case EFC_EVT_SRRS_ELS_CMPL_FAIL:
688 if (WARN_ON(!node->els_cmpl_cnt))
690 node->els_cmpl_cnt--;
694 * handle any ELS request completions that
695 * other states either didn't care about
698 case EFC_EVT_SRRS_ELS_REQ_OK:
699 case EFC_EVT_SRRS_ELS_REQ_FAIL:
700 case EFC_EVT_SRRS_ELS_REQ_RJT:
701 case EFC_EVT_ELS_REQ_ABORTED:
702 if (WARN_ON(!node->els_req_cnt))
707 case EFC_EVT_ELS_RCVD: {
708 struct fc_frame_header *hdr = cbdata->header->dma.virt;
711 * Unsupported ELS was received,
712 * send LS_RJT, command not supported
715 "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
716 node->display_name, funcname,
717 ((u8 *)cbdata->payload->dma.virt)[0]);
719 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
720 ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
724 case EFC_EVT_PLOGI_RCVD:
725 case EFC_EVT_FLOGI_RCVD:
726 case EFC_EVT_LOGO_RCVD:
727 case EFC_EVT_PRLI_RCVD:
728 case EFC_EVT_PRLO_RCVD:
729 case EFC_EVT_PDISC_RCVD:
730 case EFC_EVT_FDISC_RCVD:
731 case EFC_EVT_ADISC_RCVD:
732 case EFC_EVT_RSCN_RCVD:
733 case EFC_EVT_SCR_RCVD: {
734 struct fc_frame_header *hdr = cbdata->header->dma.virt;
736 /* sm: / send ELS_RJT */
737 efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
738 node->display_name, funcname,
739 efc_sm_event_name(evt));
740 /* if we didn't catch this in a state, send generic LS_RJT */
741 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
742 ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
745 case EFC_EVT_ABTS_RCVD: {
746 efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
747 node->display_name, funcname,
748 efc_sm_event_name(evt));
750 /* sm: / send BA_ACC */
751 efc_send_bls_acc(node, cbdata->header->dma.virt);
756 efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
757 node->display_name, funcname,
758 efc_sm_event_name(evt));
763 efc_node_save_sparms(struct efc_node *node, void *payload)
765 memcpy(node->service_params, payload, sizeof(node->service_params));
769 efc_node_post_event(struct efc_node *node,
770 enum efc_sm_event evt, void *arg)
772 bool free_node = false;
776 efc_sm_post_event(&node->sm, evt, arg);
778 /* If our event call depth is one and
779 * we're not holding frames
780 * then we can dispatch any pending frames.
781 * We don't want to allow the efc_process_node_pending()
784 if (!node->hold_frames && node->evtdepth == 1)
785 efc_process_node_pending(node);
790 * Free the node object if so requested,
791 * and we're at an event call depth of zero
793 if (node->evtdepth == 0 && node->req_free)
801 efc_node_transition(struct efc_node *node,
802 void (*state)(struct efc_sm_ctx *,
803 enum efc_sm_event, void *), void *data)
805 struct efc_sm_ctx *ctx = &node->sm;
807 if (ctx->current_state == state) {
808 efc_node_post_event(node, EFC_EVT_REENTER, data);
810 efc_node_post_event(node, EFC_EVT_EXIT, data);
811 ctx->current_state = state;
812 efc_node_post_event(node, EFC_EVT_ENTER, data);
817 efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
819 memset(buf, 0, buf_len);
821 snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
825 efc_node_get_wwpn(struct efc_node *node)
827 struct fc_els_flogi *sp =
828 (struct fc_els_flogi *)node->service_params;
830 return be64_to_cpu(sp->fl_wwpn);
834 efc_node_get_wwnn(struct efc_node *node)
836 struct fc_els_flogi *sp =
837 (struct fc_els_flogi *)node->service_params;
839 return be64_to_cpu(sp->fl_wwnn);
843 efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
844 u8 cmd, void (*efc_node_common_func)(const char *,
845 struct efc_sm_ctx *, enum efc_sm_event, void *),
846 const char *funcname)
852 efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
853 u16 cmd, void (*efc_node_common_func)(const char *,
854 struct efc_sm_ctx *, enum efc_sm_event, void *),
855 const char *funcname)
861 efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
864 unsigned long flags = 0;
866 spin_lock_irqsave(&node->els_ios_lock, flags);
867 empty = list_empty(list);
868 spin_unlock_irqrestore(&node->els_ios_lock, flags);
873 efc_node_pause(struct efc_node *node,
874 void (*state)(struct efc_sm_ctx *,
875 enum efc_sm_event, void *))
878 node->nodedb_state = state;
879 efc_node_transition(node, __efc_node_paused, NULL);
883 __efc_node_paused(struct efc_sm_ctx *ctx,
884 enum efc_sm_event evt, void *arg)
886 struct efc_node *node = ctx->app;
888 efc_node_evt_set(ctx, evt, __func__);
893 * This state is entered when a state is "paused". When resumed, the
894 * node is transitioned to a previously saved state (node->ndoedb_state)
898 node_printf(node, "Paused\n");
901 case EFC_EVT_RESUME: {
902 void (*pf)(struct efc_sm_ctx *ctx,
903 enum efc_sm_event evt, void *arg);
905 pf = node->nodedb_state;
907 node->nodedb_state = NULL;
908 efc_node_transition(node, pf, NULL);
912 case EFC_EVT_DOMAIN_ATTACH_OK:
915 case EFC_EVT_SHUTDOWN:
916 node->req_free = true;
920 __efc_node_common(__func__, ctx, evt, arg);
925 efc_node_recv_els_frame(struct efc_node *node,
926 struct efc_hw_sequence *seq)
928 u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
931 enum efc_sm_event evt;
934 {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
935 {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
936 {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
937 {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
938 {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
939 {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
940 {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
941 {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
942 {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
943 {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
945 struct efc_node_cb cbdata;
946 u8 *buf = seq->payload->dma.virt;
947 enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
950 memset(&cbdata, 0, sizeof(cbdata));
951 cbdata.header = seq->header;
952 cbdata.payload = seq->payload;
954 /* find a matching event for the ELS command */
955 for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
956 if (els_cmd_list[i].cmd == buf[0]) {
957 evt = els_cmd_list[i].evt;
962 efc_node_post_event(node, evt, &cbdata);
966 efc_node_recv_ct_frame(struct efc_node *node,
967 struct efc_hw_sequence *seq)
969 struct fc_ct_hdr *iu = seq->payload->dma.virt;
970 struct fc_frame_header *hdr = seq->header->dma.virt;
971 struct efc *efc = node->efc;
972 u16 gscmd = be16_to_cpu(iu->ct_cmd);
974 efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
975 node->display_name, gscmd);
976 efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
977 FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
981 efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
983 struct efc_node_cb cbdata;
985 memset(&cbdata, 0, sizeof(cbdata));
986 cbdata.header = seq->header;
987 cbdata.payload = seq->payload;
989 efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
993 efc_process_node_pending(struct efc_node *node)
995 struct efc *efc = node->efc;
996 struct efc_hw_sequence *seq = NULL;
997 u32 pend_frames_processed = 0;
998 unsigned long flags = 0;
1001 /* need to check for hold frames condition after each frame
1002 * processed because any given frame could cause a transition
1003 * to a state that holds frames
1005 if (node->hold_frames)
1009 /* Get next frame/sequence */
1010 spin_lock_irqsave(&node->pend_frames_lock, flags);
1012 if (!list_empty(&node->pend_frames)) {
1013 seq = list_first_entry(&node->pend_frames,
1014 struct efc_hw_sequence, list_entry);
1015 list_del(&seq->list_entry);
1017 spin_unlock_irqrestore(&node->pend_frames_lock, flags);
1020 pend_frames_processed = node->pend_frames_processed;
1021 node->pend_frames_processed = 0;
1024 node->pend_frames_processed++;
1026 /* now dispatch frame(s) to dispatch function */
1027 efc_node_dispatch_frame(node, seq);
1028 efc->tt.hw_seq_free(efc, seq);
1031 if (pend_frames_processed != 0)
1032 efc_log_debug(efc, "%u node frames held and processed\n",
1033 pend_frames_processed);
1037 efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
1039 unsigned long flags = 0;
1040 enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
1041 struct efc *efc = node->efc;
1044 evt = EFC_EVT_NODE_SESS_REG_FAIL;
1046 spin_lock_irqsave(&efc->lock, flags);
1047 /* Notify the node to resume */
1048 efc_node_post_event(node, evt, NULL);
1049 spin_unlock_irqrestore(&efc->lock, flags);
1053 efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
1055 unsigned long flags = 0;
1057 spin_lock_irqsave(&efc->lock, flags);
1058 /* Notify the node to resume */
1059 efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
1060 spin_unlock_irqrestore(&efc->lock, flags);
1064 efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
1066 unsigned long flags = 0;
1068 spin_lock_irqsave(&efc->lock, flags);
1069 /* Notify the node to resume */
1070 efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
1071 spin_unlock_irqrestore(&efc->lock, flags);
1075 efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
1077 unsigned long flags = 0;
1079 spin_lock_irqsave(&efc->lock, flags);
1080 efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
1081 spin_unlock_irqrestore(&efc->lock, flags);
1084 void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
1086 struct efc *efc = node->efc;
1087 unsigned long flags = 0;
1089 spin_lock_irqsave(&efc->lock, flags);
1090 efc_node_post_event(node, evt, arg);
1091 spin_unlock_irqrestore(&efc->lock, flags);
1094 void efc_node_post_shutdown(struct efc_node *node, void *arg)
1096 unsigned long flags = 0;
1097 struct efc *efc = node->efc;
1099 spin_lock_irqsave(&efc->lock, flags);
1100 efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
1101 spin_unlock_irqrestore(&efc->lock, flags);