4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/lnet/lib-move.c
38 * Data movement routines
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include "../../include/linux/lnet/lib-lnet.h"
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
50 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
54 struct list_head *next;
55 struct list_head cull;
57 LASSERT(the_lnet.ln_init);
59 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
61 /* Adding a new entry */
62 LIBCFS_ALLOC(tp, sizeof(*tp));
67 tp->tp_threshold = threshold;
70 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
75 /* removing entries */
76 INIT_LIST_HEAD(&cull);
80 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
81 tp = list_entry(el, lnet_test_peer_t, tp_list);
83 if (tp->tp_threshold == 0 || /* needs culling anyway */
84 nid == LNET_NID_ANY || /* removing all entries */
85 tp->tp_nid == nid) { /* matched this one */
86 list_del(&tp->tp_list);
87 list_add(&tp->tp_list, &cull);
93 while (!list_empty(&cull)) {
94 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
96 list_del(&tp->tp_list);
97 LIBCFS_FREE(tp, sizeof(*tp));
103 fail_peer(lnet_nid_t nid, int outgoing)
105 lnet_test_peer_t *tp;
106 struct list_head *el;
107 struct list_head *next;
108 struct list_head cull;
111 INIT_LIST_HEAD(&cull);
113 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
116 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
117 tp = list_entry(el, lnet_test_peer_t, tp_list);
119 if (tp->tp_threshold == 0) {
122 /* only cull zombies on outgoing tests,
123 * since we may be at interrupt priority on
124 * incoming messages. */
125 list_del(&tp->tp_list);
126 list_add(&tp->tp_list, &cull);
131 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
132 nid == tp->tp_nid) { /* fail this peer */
135 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
138 tp->tp_threshold == 0) {
140 list_del(&tp->tp_list);
141 list_add(&tp->tp_list, &cull);
150 while (!list_empty(&cull)) {
151 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
152 list_del(&tp->tp_list);
154 LIBCFS_FREE(tp, sizeof(*tp));
161 lnet_iov_nob(unsigned int niov, struct kvec *iov)
163 unsigned int nob = 0;
166 nob += (iov++)->iov_len;
170 EXPORT_SYMBOL(lnet_iov_nob);
173 lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
174 unsigned int nsiov, struct kvec *siov, unsigned int soffset,
177 /* NB diov, siov are READ-ONLY */
178 unsigned int this_nob;
183 /* skip complete frags before 'doffset' */
185 while (doffset >= diov->iov_len) {
186 doffset -= diov->iov_len;
192 /* skip complete frags before 'soffset' */
194 while (soffset >= siov->iov_len) {
195 soffset -= siov->iov_len;
204 this_nob = min(diov->iov_len - doffset,
205 siov->iov_len - soffset);
206 this_nob = min(this_nob, nob);
208 memcpy((char *)diov->iov_base + doffset,
209 (char *)siov->iov_base + soffset, this_nob);
212 if (diov->iov_len > doffset + this_nob) {
220 if (siov->iov_len > soffset + this_nob) {
229 EXPORT_SYMBOL(lnet_copy_iov2iov);
232 lnet_extract_iov(int dst_niov, struct kvec *dst,
233 int src_niov, struct kvec *src,
234 unsigned int offset, unsigned int len)
236 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
237 * for exactly 'len' bytes, and return the number of entries.
238 * NB not destructive to 'src' */
239 unsigned int frag_len;
242 if (len == 0) /* no data => */
243 return 0; /* no frags */
245 LASSERT(src_niov > 0);
246 while (offset >= src->iov_len) { /* skip initial frags */
247 offset -= src->iov_len;
250 LASSERT(src_niov > 0);
255 LASSERT(src_niov > 0);
256 LASSERT((int)niov <= dst_niov);
258 frag_len = src->iov_len - offset;
259 dst->iov_base = ((char *)src->iov_base) + offset;
261 if (len <= frag_len) {
266 dst->iov_len = frag_len;
276 EXPORT_SYMBOL(lnet_extract_iov);
279 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
281 unsigned int nob = 0;
284 nob += (kiov++)->kiov_len;
288 EXPORT_SYMBOL(lnet_kiov_nob);
291 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
292 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
295 /* NB diov, siov are READ-ONLY */
296 unsigned int this_nob;
303 LASSERT(!in_interrupt());
306 while (doffset >= diov->kiov_len) {
307 doffset -= diov->kiov_len;
314 while (soffset >= siov->kiov_len) {
315 soffset -= siov->kiov_len;
324 this_nob = min(diov->kiov_len - doffset,
325 siov->kiov_len - soffset);
326 this_nob = min(this_nob, nob);
329 daddr = ((char *)kmap(diov->kiov_page)) +
330 diov->kiov_offset + doffset;
332 saddr = ((char *)kmap(siov->kiov_page)) +
333 siov->kiov_offset + soffset;
335 /* Vanishing risk of kmap deadlock when mapping 2 pages.
336 * However in practice at least one of the kiovs will be mapped
337 * kernel pages and the map/unmap will be NOOPs */
339 memcpy(daddr, saddr, this_nob);
342 if (diov->kiov_len > doffset + this_nob) {
346 kunmap(diov->kiov_page);
353 if (siov->kiov_len > soffset + this_nob) {
357 kunmap(siov->kiov_page);
366 kunmap(diov->kiov_page);
368 kunmap(siov->kiov_page);
370 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
373 lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
374 unsigned int nkiov, lnet_kiov_t *kiov,
375 unsigned int kiovoffset, unsigned int nob)
377 /* NB iov, kiov are READ-ONLY */
378 unsigned int this_nob;
384 LASSERT(!in_interrupt());
387 while (iovoffset >= iov->iov_len) {
388 iovoffset -= iov->iov_len;
395 while (kiovoffset >= kiov->kiov_len) {
396 kiovoffset -= kiov->kiov_len;
405 this_nob = min(iov->iov_len - iovoffset,
406 (__kernel_size_t) kiov->kiov_len - kiovoffset);
407 this_nob = min(this_nob, nob);
410 addr = ((char *)kmap(kiov->kiov_page)) +
411 kiov->kiov_offset + kiovoffset;
413 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
416 if (iov->iov_len > iovoffset + this_nob) {
417 iovoffset += this_nob;
424 if (kiov->kiov_len > kiovoffset + this_nob) {
426 kiovoffset += this_nob;
428 kunmap(kiov->kiov_page);
438 kunmap(kiov->kiov_page);
440 EXPORT_SYMBOL(lnet_copy_kiov2iov);
443 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
444 unsigned int kiovoffset, unsigned int niov,
445 struct kvec *iov, unsigned int iovoffset,
448 /* NB kiov, iov are READ-ONLY */
449 unsigned int this_nob;
455 LASSERT(!in_interrupt());
458 while (kiovoffset >= kiov->kiov_len) {
459 kiovoffset -= kiov->kiov_len;
466 while (iovoffset >= iov->iov_len) {
467 iovoffset -= iov->iov_len;
476 this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
477 iov->iov_len - iovoffset);
478 this_nob = min(this_nob, nob);
481 addr = ((char *)kmap(kiov->kiov_page)) +
482 kiov->kiov_offset + kiovoffset;
484 memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
487 if (kiov->kiov_len > kiovoffset + this_nob) {
489 kiovoffset += this_nob;
491 kunmap(kiov->kiov_page);
498 if (iov->iov_len > iovoffset + this_nob) {
499 iovoffset += this_nob;
508 kunmap(kiov->kiov_page);
510 EXPORT_SYMBOL(lnet_copy_iov2kiov);
513 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
514 int src_niov, lnet_kiov_t *src,
515 unsigned int offset, unsigned int len)
517 /* Initialise 'dst' to the subset of 'src' starting at 'offset',
518 * for exactly 'len' bytes, and return the number of entries.
519 * NB not destructive to 'src' */
520 unsigned int frag_len;
523 if (len == 0) /* no data => */
524 return 0; /* no frags */
526 LASSERT(src_niov > 0);
527 while (offset >= src->kiov_len) { /* skip initial frags */
528 offset -= src->kiov_len;
531 LASSERT(src_niov > 0);
536 LASSERT(src_niov > 0);
537 LASSERT((int)niov <= dst_niov);
539 frag_len = src->kiov_len - offset;
540 dst->kiov_page = src->kiov_page;
541 dst->kiov_offset = src->kiov_offset + offset;
543 if (len <= frag_len) {
545 LASSERT(dst->kiov_offset + dst->kiov_len
550 dst->kiov_len = frag_len;
551 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
561 EXPORT_SYMBOL(lnet_extract_kiov);
564 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
565 unsigned int offset, unsigned int mlen, unsigned int rlen)
567 unsigned int niov = 0;
568 struct kvec *iov = NULL;
569 lnet_kiov_t *kiov = NULL;
572 LASSERT(!in_interrupt());
573 LASSERT(mlen == 0 || msg != NULL);
576 LASSERT(msg->msg_receiving);
577 LASSERT(!msg->msg_sending);
578 LASSERT(rlen == msg->msg_len);
579 LASSERT(mlen <= msg->msg_len);
580 LASSERT(msg->msg_offset == offset);
581 LASSERT(msg->msg_wanted == mlen);
583 msg->msg_receiving = 0;
586 niov = msg->msg_niov;
588 kiov = msg->msg_kiov;
591 LASSERT((iov == NULL) != (kiov == NULL));
595 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
596 niov, iov, kiov, offset, mlen, rlen);
598 lnet_finalize(ni, msg, rc);
602 lnet_setpayloadbuffer(lnet_msg_t *msg)
604 lnet_libmd_t *md = msg->msg_md;
606 LASSERT(msg->msg_len > 0);
607 LASSERT(!msg->msg_routing);
609 LASSERT(msg->msg_niov == 0);
610 LASSERT(msg->msg_iov == NULL);
611 LASSERT(msg->msg_kiov == NULL);
613 msg->msg_niov = md->md_niov;
614 if ((md->md_options & LNET_MD_KIOV) != 0)
615 msg->msg_kiov = md->md_iov.kiov;
617 msg->msg_iov = md->md_iov.iov;
621 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
622 unsigned int offset, unsigned int len)
624 msg->msg_type = type;
625 msg->msg_target = target;
627 msg->msg_offset = offset;
630 lnet_setpayloadbuffer(msg);
632 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
633 msg->msg_hdr.type = cpu_to_le32(type);
634 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
635 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
636 /* src_nid will be set later */
637 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
638 msg->msg_hdr.payload_length = cpu_to_le32(len);
642 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
644 void *priv = msg->msg_private;
647 LASSERT(!in_interrupt());
648 LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
649 (msg->msg_txcredit && msg->msg_peertxcredit));
651 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
653 lnet_finalize(ni, msg, rc);
657 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
661 LASSERT(!msg->msg_sending);
662 LASSERT(msg->msg_receiving);
663 LASSERT(!msg->msg_rx_ready_delay);
664 LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
666 msg->msg_rx_ready_delay = 1;
667 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
670 CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
671 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
672 libcfs_id2str(msg->msg_target), rc);
673 LASSERT(rc < 0); /* required by my callers */
679 /* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
681 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
683 unsigned long last_alive = 0;
685 LASSERT(lnet_peer_aliveness_enabled(lp));
686 LASSERT(ni->ni_lnd->lnd_query != NULL);
688 lnet_net_unlock(lp->lp_cpt);
689 (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
690 lnet_net_lock(lp->lp_cpt);
692 lp->lp_last_query = cfs_time_current();
694 if (last_alive != 0) /* NI has updated timestamp */
695 lp->lp_last_alive = last_alive;
698 /* NB: always called with lnet_net_lock held */
700 lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
703 unsigned long deadline;
705 LASSERT(lnet_peer_aliveness_enabled(lp));
707 /* Trust lnet_notify() if it has more recent aliveness news, but
708 * ignore the initial assumed death (see lnet_peers_start_down()).
710 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
711 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
714 deadline = cfs_time_add(lp->lp_last_alive,
715 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
716 alive = cfs_time_after(deadline, now);
718 /* Update obsolete lp_alive except for routers assumed to be dead
719 * initially, because router checker would update aliveness in this
720 * case, and moreover lp_last_alive at peer creation is assumed.
722 if (alive && !lp->lp_alive &&
723 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
724 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
729 /* NB: returns 1 when alive, 0 when dead, negative when error;
730 * may drop the lnet_net_lock */
732 lnet_peer_alive_locked(lnet_peer_t *lp)
734 unsigned long now = cfs_time_current();
736 if (!lnet_peer_aliveness_enabled(lp))
739 if (lnet_peer_is_alive(lp, now))
742 /* Peer appears dead, but we should avoid frequent NI queries (at
743 * most once per lnet_queryinterval seconds). */
744 if (lp->lp_last_query != 0) {
745 static const int lnet_queryinterval = 1;
747 unsigned long next_query =
748 cfs_time_add(lp->lp_last_query,
749 cfs_time_seconds(lnet_queryinterval));
751 if (time_before(now, next_query)) {
753 CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
754 libcfs_nid2str(lp->lp_nid),
755 (int)now, (int)next_query,
757 lp->lp_ni->ni_peertimeout);
762 /* query NI for latest aliveness news */
763 lnet_ni_query_locked(lp->lp_ni, lp);
765 if (lnet_peer_is_alive(lp, now))
768 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
773 * \param msg The message to be sent.
774 * \param do_send True if lnet_ni_send() should be called in this function.
775 * lnet_send() is going to lnet_net_unlock immediately after this, so
776 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
778 * \retval 0 If \a msg sent or OK to send.
779 * \retval EAGAIN If \a msg blocked for credit.
780 * \retval EHOSTUNREACH If the next hop of the message appears dead.
781 * \retval ECANCELED If the MD of the message has been unlinked.
784 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
786 lnet_peer_t *lp = msg->msg_txpeer;
787 lnet_ni_t *ni = lp->lp_ni;
788 int cpt = msg->msg_tx_cpt;
789 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
791 /* non-lnet_send() callers have checked before */
792 LASSERT(!do_send || msg->msg_tx_delayed);
793 LASSERT(!msg->msg_receiving);
794 LASSERT(msg->msg_tx_committed);
796 /* NB 'lp' is always the next hop */
797 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
798 lnet_peer_alive_locked(lp) == 0) {
799 the_lnet.ln_counters[cpt]->drop_count++;
800 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
801 lnet_net_unlock(cpt);
803 CNETERR("Dropping message for %s: peer not alive\n",
804 libcfs_id2str(msg->msg_target));
806 lnet_finalize(ni, msg, -EHOSTUNREACH);
812 if (msg->msg_md != NULL &&
813 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
814 lnet_net_unlock(cpt);
816 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
817 libcfs_id2str(msg->msg_target));
819 lnet_finalize(ni, msg, -ECANCELED);
825 if (!msg->msg_peertxcredit) {
826 LASSERT((lp->lp_txcredits < 0) ==
827 !list_empty(&lp->lp_txq));
829 msg->msg_peertxcredit = 1;
830 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
833 if (lp->lp_txcredits < lp->lp_mintxcredits)
834 lp->lp_mintxcredits = lp->lp_txcredits;
836 if (lp->lp_txcredits < 0) {
837 msg->msg_tx_delayed = 1;
838 list_add_tail(&msg->msg_list, &lp->lp_txq);
843 if (!msg->msg_txcredit) {
844 LASSERT((tq->tq_credits < 0) ==
845 !list_empty(&tq->tq_delayed));
847 msg->msg_txcredit = 1;
850 if (tq->tq_credits < tq->tq_credits_min)
851 tq->tq_credits_min = tq->tq_credits;
853 if (tq->tq_credits < 0) {
854 msg->msg_tx_delayed = 1;
855 list_add_tail(&msg->msg_list, &tq->tq_delayed);
861 lnet_net_unlock(cpt);
862 lnet_ni_send(ni, msg);
868 static lnet_rtrbufpool_t *
869 lnet_msg2bufpool(lnet_msg_t *msg)
871 lnet_rtrbufpool_t *rbp;
874 LASSERT(msg->msg_rx_committed);
876 cpt = msg->msg_rx_cpt;
877 rbp = &the_lnet.ln_rtrpools[cpt][0];
879 LASSERT(msg->msg_len <= LNET_MTU);
880 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
882 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
889 lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
891 /* lnet_parse is going to lnet_net_unlock immediately after this, so it
892 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
893 * return EAGAIN if msg blocked and 0 if received or OK to receive */
894 lnet_peer_t *lp = msg->msg_rxpeer;
895 lnet_rtrbufpool_t *rbp;
898 LASSERT(msg->msg_iov == NULL);
899 LASSERT(msg->msg_kiov == NULL);
900 LASSERT(msg->msg_niov == 0);
901 LASSERT(msg->msg_routing);
902 LASSERT(msg->msg_receiving);
903 LASSERT(!msg->msg_sending);
905 /* non-lnet_parse callers only receive delayed messages */
906 LASSERT(!do_recv || msg->msg_rx_delayed);
908 if (!msg->msg_peerrtrcredit) {
909 LASSERT((lp->lp_rtrcredits < 0) ==
910 !list_empty(&lp->lp_rtrq));
912 msg->msg_peerrtrcredit = 1;
914 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
915 lp->lp_minrtrcredits = lp->lp_rtrcredits;
917 if (lp->lp_rtrcredits < 0) {
918 /* must have checked eager_recv before here */
919 LASSERT(msg->msg_rx_ready_delay);
920 msg->msg_rx_delayed = 1;
921 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
926 rbp = lnet_msg2bufpool(msg);
928 if (!msg->msg_rtrcredit) {
929 LASSERT((rbp->rbp_credits < 0) ==
930 !list_empty(&rbp->rbp_msgs));
932 msg->msg_rtrcredit = 1;
934 if (rbp->rbp_credits < rbp->rbp_mincredits)
935 rbp->rbp_mincredits = rbp->rbp_credits;
937 if (rbp->rbp_credits < 0) {
938 /* must have checked eager_recv before here */
939 LASSERT(msg->msg_rx_ready_delay);
940 msg->msg_rx_delayed = 1;
941 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
946 LASSERT(!list_empty(&rbp->rbp_bufs));
947 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
948 list_del(&rb->rb_list);
950 msg->msg_niov = rbp->rbp_npages;
951 msg->msg_kiov = &rb->rb_kiov[0];
954 int cpt = msg->msg_rx_cpt;
956 lnet_net_unlock(cpt);
957 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
958 0, msg->msg_len, msg->msg_len);
965 lnet_return_tx_credits_locked(lnet_msg_t *msg)
967 lnet_peer_t *txpeer = msg->msg_txpeer;
970 if (msg->msg_txcredit) {
971 struct lnet_ni *ni = txpeer->lp_ni;
972 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
974 /* give back NI txcredits */
975 msg->msg_txcredit = 0;
977 LASSERT((tq->tq_credits < 0) ==
978 !list_empty(&tq->tq_delayed));
981 if (tq->tq_credits <= 0) {
982 msg2 = list_entry(tq->tq_delayed.next,
983 lnet_msg_t, msg_list);
984 list_del(&msg2->msg_list);
986 LASSERT(msg2->msg_txpeer->lp_ni == ni);
987 LASSERT(msg2->msg_tx_delayed);
989 (void) lnet_post_send_locked(msg2, 1);
993 if (msg->msg_peertxcredit) {
994 /* give back peer txcredits */
995 msg->msg_peertxcredit = 0;
997 LASSERT((txpeer->lp_txcredits < 0) ==
998 !list_empty(&txpeer->lp_txq));
1000 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1001 LASSERT(txpeer->lp_txqnob >= 0);
1003 txpeer->lp_txcredits++;
1004 if (txpeer->lp_txcredits <= 0) {
1005 msg2 = list_entry(txpeer->lp_txq.next,
1006 lnet_msg_t, msg_list);
1007 list_del(&msg2->msg_list);
1009 LASSERT(msg2->msg_txpeer == txpeer);
1010 LASSERT(msg2->msg_tx_delayed);
1012 (void) lnet_post_send_locked(msg2, 1);
1016 if (txpeer != NULL) {
1017 msg->msg_txpeer = NULL;
1018 lnet_peer_decref_locked(txpeer);
1023 lnet_return_rx_credits_locked(lnet_msg_t *msg)
1025 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1028 if (msg->msg_rtrcredit) {
1029 /* give back global router credits */
1031 lnet_rtrbufpool_t *rbp;
1033 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1034 * there until it gets one allocated, or aborts the wait
1036 LASSERT(msg->msg_kiov != NULL);
1038 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1040 LASSERT(rbp == lnet_msg2bufpool(msg));
1042 msg->msg_kiov = NULL;
1043 msg->msg_rtrcredit = 0;
1045 LASSERT((rbp->rbp_credits < 0) ==
1046 !list_empty(&rbp->rbp_msgs));
1047 LASSERT((rbp->rbp_credits > 0) ==
1048 !list_empty(&rbp->rbp_bufs));
1050 list_add(&rb->rb_list, &rbp->rbp_bufs);
1052 if (rbp->rbp_credits <= 0) {
1053 msg2 = list_entry(rbp->rbp_msgs.next,
1054 lnet_msg_t, msg_list);
1055 list_del(&msg2->msg_list);
1057 (void) lnet_post_routed_recv_locked(msg2, 1);
1061 if (msg->msg_peerrtrcredit) {
1062 /* give back peer router credits */
1063 msg->msg_peerrtrcredit = 0;
1065 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1066 !list_empty(&rxpeer->lp_rtrq));
1068 rxpeer->lp_rtrcredits++;
1069 if (rxpeer->lp_rtrcredits <= 0) {
1070 msg2 = list_entry(rxpeer->lp_rtrq.next,
1071 lnet_msg_t, msg_list);
1072 list_del(&msg2->msg_list);
1074 (void) lnet_post_routed_recv_locked(msg2, 1);
1077 if (rxpeer != NULL) {
1078 msg->msg_rxpeer = NULL;
1079 lnet_peer_decref_locked(rxpeer);
1084 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1086 lnet_peer_t *p1 = r1->lr_gateway;
1087 lnet_peer_t *p2 = r2->lr_gateway;
1089 if (r1->lr_priority < r2->lr_priority)
1092 if (r1->lr_priority > r2->lr_priority)
1095 if (r1->lr_hops < r2->lr_hops)
1098 if (r1->lr_hops > r2->lr_hops)
1101 if (p1->lp_txqnob < p2->lp_txqnob)
1104 if (p1->lp_txqnob > p2->lp_txqnob)
1107 if (p1->lp_txcredits > p2->lp_txcredits)
1110 if (p1->lp_txcredits < p2->lp_txcredits)
1113 if (r1->lr_seq - r2->lr_seq <= 0)
1119 static lnet_peer_t *
1120 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1122 lnet_remotenet_t *rnet;
1124 lnet_route_t *rtr_best;
1125 lnet_route_t *rtr_last;
1126 struct lnet_peer *lp_best;
1127 struct lnet_peer *lp;
1130 /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
1131 * rtr_nid nid, otherwise find the best gateway I can use */
1133 rnet = lnet_find_net_locked(LNET_NIDNET(target));
1138 rtr_best = rtr_last = NULL;
1139 list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
1140 lp = rtr->lr_gateway;
1142 if (!lp->lp_alive || /* gateway is down */
1143 ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
1144 rtr->lr_downis != 0)) /* NI to target is down */
1147 if (ni != NULL && lp->lp_ni != ni)
1150 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1153 if (lp_best == NULL) {
1154 rtr_best = rtr_last = rtr;
1159 /* no protection on below fields, but it's harmless */
1160 if (rtr_last->lr_seq - rtr->lr_seq < 0)
1163 rc = lnet_compare_routes(rtr, rtr_best);
1171 /* set sequence number on the best router to the latest sequence + 1
1172 * so we can round-robin all routers, it's race and inaccurate but
1173 * harmless and functional */
1174 if (rtr_best != NULL)
1175 rtr_best->lr_seq = rtr_last->lr_seq + 1;
1180 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1182 lnet_nid_t dst_nid = msg->msg_target.nid;
1183 struct lnet_ni *src_ni;
1184 struct lnet_ni *local_ni;
1185 struct lnet_peer *lp;
1190 /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
1191 * but we might want to use pre-determined router for ACK/REPLY
1193 /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
1194 LASSERT(msg->msg_txpeer == NULL);
1195 LASSERT(!msg->msg_sending);
1196 LASSERT(!msg->msg_target_is_router);
1197 LASSERT(!msg->msg_receiving);
1199 msg->msg_sending = 1;
1201 LASSERT(!msg->msg_tx_committed);
1202 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1206 if (the_lnet.ln_shutdown) {
1207 lnet_net_unlock(cpt);
1211 if (src_nid == LNET_NID_ANY) {
1214 src_ni = lnet_nid2ni_locked(src_nid, cpt);
1215 if (src_ni == NULL) {
1216 lnet_net_unlock(cpt);
1217 LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
1218 libcfs_nid2str(dst_nid),
1219 libcfs_nid2str(src_nid));
1222 LASSERT(!msg->msg_routing);
1225 /* Is this for someone on a local network? */
1226 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1228 if (local_ni != NULL) {
1229 if (src_ni == NULL) {
1231 src_nid = src_ni->ni_nid;
1232 } else if (src_ni == local_ni) {
1233 lnet_ni_decref_locked(local_ni, cpt);
1235 lnet_ni_decref_locked(local_ni, cpt);
1236 lnet_ni_decref_locked(src_ni, cpt);
1237 lnet_net_unlock(cpt);
1238 LCONSOLE_WARN("No route to %s via from %s\n",
1239 libcfs_nid2str(dst_nid),
1240 libcfs_nid2str(src_nid));
1244 LASSERT(src_nid != LNET_NID_ANY);
1245 lnet_msg_commit(msg, cpt);
1247 if (!msg->msg_routing)
1248 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1250 if (src_ni == the_lnet.ln_loni) {
1251 /* No send credit hassles with LOLND */
1252 lnet_net_unlock(cpt);
1253 lnet_ni_send(src_ni, msg);
1256 lnet_ni_decref_locked(src_ni, cpt);
1257 lnet_net_unlock(cpt);
1261 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1262 /* lp has ref on src_ni; lose mine */
1263 lnet_ni_decref_locked(src_ni, cpt);
1265 lnet_net_unlock(cpt);
1266 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1267 libcfs_nid2str(dst_nid));
1268 /* ENOMEM or shutting down */
1271 LASSERT(lp->lp_ni == src_ni);
1273 /* sending to a remote network */
1274 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
1277 lnet_ni_decref_locked(src_ni, cpt);
1278 lnet_net_unlock(cpt);
1280 LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
1281 libcfs_id2str(msg->msg_target),
1282 libcfs_nid2str(src_nid));
1283 return -EHOSTUNREACH;
1286 /* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
1287 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1288 * pre-determined router, this can happen if router table
1289 * was changed when we release the lock */
1290 if (rtr_nid != lp->lp_nid) {
1291 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1294 lnet_ni_decref_locked(src_ni, cpt);
1295 lnet_net_unlock(cpt);
1297 rtr_nid = lp->lp_nid;
1303 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1304 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1305 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1307 if (src_ni == NULL) {
1309 src_nid = src_ni->ni_nid;
1311 LASSERT(src_ni == lp->lp_ni);
1312 lnet_ni_decref_locked(src_ni, cpt);
1315 lnet_peer_addref_locked(lp);
1317 LASSERT(src_nid != LNET_NID_ANY);
1318 lnet_msg_commit(msg, cpt);
1320 if (!msg->msg_routing) {
1321 /* I'm the source and now I know which NI to send on */
1322 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1325 msg->msg_target_is_router = 1;
1326 msg->msg_target.nid = lp->lp_nid;
1327 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1330 /* 'lp' is our best choice of peer */
1332 LASSERT(!msg->msg_peertxcredit);
1333 LASSERT(!msg->msg_txcredit);
1334 LASSERT(msg->msg_txpeer == NULL);
1336 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1338 rc = lnet_post_send_locked(msg, 0);
1339 lnet_net_unlock(cpt);
1341 if (rc == EHOSTUNREACH || rc == ECANCELED)
1345 lnet_ni_send(src_ni, msg);
1347 return 0; /* rc == 0 or EAGAIN */
1351 lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1354 the_lnet.ln_counters[cpt]->drop_count++;
1355 the_lnet.ln_counters[cpt]->drop_length += nob;
1356 lnet_net_unlock(cpt);
1358 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1362 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1364 lnet_hdr_t *hdr = &msg->msg_hdr;
1366 if (msg->msg_wanted != 0)
1367 lnet_setpayloadbuffer(msg);
1369 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1371 /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
1372 * it back into the ACK during lnet_finalize() */
1373 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1374 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
1376 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1377 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1381 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1383 lnet_hdr_t *hdr = &msg->msg_hdr;
1384 struct lnet_match_info info;
1387 /* Convert put fields to host byte order */
1388 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1389 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1390 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1392 info.mi_id.nid = hdr->src_nid;
1393 info.mi_id.pid = hdr->src_pid;
1394 info.mi_opc = LNET_MD_OP_PUT;
1395 info.mi_portal = hdr->msg.put.ptl_index;
1396 info.mi_rlength = hdr->payload_length;
1397 info.mi_roffset = hdr->msg.put.offset;
1398 info.mi_mbits = hdr->msg.put.match_bits;
1400 msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
1403 rc = lnet_ptl_match_md(&info, msg);
1408 case LNET_MATCHMD_OK:
1409 lnet_recv_put(ni, msg);
1412 case LNET_MATCHMD_NONE:
1413 if (msg->msg_rx_delayed) /* attached on delayed list */
1416 rc = lnet_ni_eager_recv(ni, msg);
1421 case LNET_MATCHMD_DROP:
1422 CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
1423 libcfs_id2str(info.mi_id), info.mi_portal,
1424 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1426 return ENOENT; /* +ve: OK but no match */
1431 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1433 struct lnet_match_info info;
1434 lnet_hdr_t *hdr = &msg->msg_hdr;
1435 lnet_handle_wire_t reply_wmd;
1438 /* Convert get fields to host byte order */
1439 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1440 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1441 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1442 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1444 info.mi_id.nid = hdr->src_nid;
1445 info.mi_id.pid = hdr->src_pid;
1446 info.mi_opc = LNET_MD_OP_GET;
1447 info.mi_portal = hdr->msg.get.ptl_index;
1448 info.mi_rlength = hdr->msg.get.sink_length;
1449 info.mi_roffset = hdr->msg.get.src_offset;
1450 info.mi_mbits = hdr->msg.get.match_bits;
1452 rc = lnet_ptl_match_md(&info, msg);
1453 if (rc == LNET_MATCHMD_DROP) {
1454 CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
1455 libcfs_id2str(info.mi_id), info.mi_portal,
1456 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1457 return ENOENT; /* +ve: OK but no match */
1460 LASSERT(rc == LNET_MATCHMD_OK);
1462 lnet_build_msg_event(msg, LNET_EVENT_GET);
1464 reply_wmd = hdr->msg.get.return_wmd;
1466 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1467 msg->msg_offset, msg->msg_wanted);
1469 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1472 /* The LND completes the REPLY from her recv procedure */
1473 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1474 msg->msg_offset, msg->msg_len, msg->msg_len);
1478 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1479 msg->msg_receiving = 0;
1481 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1483 /* didn't get as far as lnet_ni_send() */
1484 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1485 libcfs_nid2str(ni->ni_nid),
1486 libcfs_id2str(info.mi_id), rc);
1488 lnet_finalize(ni, msg, rc);
1495 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1497 void *private = msg->msg_private;
1498 lnet_hdr_t *hdr = &msg->msg_hdr;
1499 lnet_process_id_t src = {0};
1505 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1508 src.nid = hdr->src_nid;
1509 src.pid = hdr->src_pid;
1511 /* NB handles only looked up by creator (no flips) */
1512 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1513 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1514 CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
1515 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1516 (md == NULL) ? "invalid" : "inactive",
1517 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1518 hdr->msg.reply.dst_wmd.wh_object_cookie);
1519 if (md != NULL && md->md_me != NULL)
1520 CERROR("REPLY MD also attached to portal %d\n",
1521 md->md_me->me_portal);
1523 lnet_res_unlock(cpt);
1524 return ENOENT; /* +ve: OK but no match */
1527 LASSERT(md->md_offset == 0);
1529 rlength = hdr->payload_length;
1530 mlength = min_t(uint, rlength, md->md_length);
1532 if (mlength < rlength &&
1533 (md->md_options & LNET_MD_TRUNCATE) == 0) {
1534 CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
1535 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1536 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1538 lnet_res_unlock(cpt);
1539 return ENOENT; /* +ve: OK but no match */
1542 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
1543 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1544 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1546 lnet_msg_attach_md(msg, md, 0, mlength);
1549 lnet_setpayloadbuffer(msg);
1551 lnet_res_unlock(cpt);
1553 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1555 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1560 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1562 lnet_hdr_t *hdr = &msg->msg_hdr;
1563 lnet_process_id_t src = {0};
1567 src.nid = hdr->src_nid;
1568 src.pid = hdr->src_pid;
1570 /* Convert ack fields to host byte order */
1571 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1572 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1574 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1577 /* NB handles only looked up by creator (no flips) */
1578 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1579 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1580 /* Don't moan; this is expected */
1582 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
1583 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1584 (md == NULL) ? "invalid" : "inactive",
1585 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1586 hdr->msg.ack.dst_wmd.wh_object_cookie);
1587 if (md != NULL && md->md_me != NULL)
1588 CERROR("Source MD also attached to portal %d\n",
1589 md->md_me->me_portal);
1591 lnet_res_unlock(cpt);
1592 return ENOENT; /* +ve! */
1595 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
1596 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1597 hdr->msg.ack.dst_wmd.wh_object_cookie);
1599 lnet_msg_attach_md(msg, md, 0, 0);
1601 lnet_res_unlock(cpt);
1603 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1605 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1610 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1614 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1615 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
1616 if (ni->ni_lnd->lnd_eager_recv == NULL) {
1617 msg->msg_rx_ready_delay = 1;
1619 lnet_net_unlock(msg->msg_rx_cpt);
1620 rc = lnet_ni_eager_recv(ni, msg);
1621 lnet_net_lock(msg->msg_rx_cpt);
1626 rc = lnet_post_routed_recv_locked(msg, 0);
1631 lnet_msgtyp2str(int type)
1640 case LNET_MSG_REPLY:
1642 case LNET_MSG_HELLO:
1648 EXPORT_SYMBOL(lnet_msgtyp2str);
1651 lnet_print_hdr(lnet_hdr_t *hdr)
1653 lnet_process_id_t src = {0};
1654 lnet_process_id_t dst = {0};
1655 char *type_str = lnet_msgtyp2str(hdr->type);
1657 src.nid = hdr->src_nid;
1658 src.pid = hdr->src_pid;
1660 dst.nid = hdr->dest_nid;
1661 dst.pid = hdr->dest_pid;
1663 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1664 CWARN(" From %s\n", libcfs_id2str(src));
1665 CWARN(" To %s\n", libcfs_id2str(dst));
1667 switch (hdr->type) {
1672 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
1673 hdr->msg.put.ptl_index,
1674 hdr->msg.put.ack_wmd.wh_interface_cookie,
1675 hdr->msg.put.ack_wmd.wh_object_cookie,
1676 hdr->msg.put.match_bits);
1677 CWARN(" Length %d, offset %d, hdr data %#llx\n",
1678 hdr->payload_length, hdr->msg.put.offset,
1679 hdr->msg.put.hdr_data);
1683 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
1684 hdr->msg.get.ptl_index,
1685 hdr->msg.get.return_wmd.wh_interface_cookie,
1686 hdr->msg.get.return_wmd.wh_object_cookie,
1687 hdr->msg.get.match_bits);
1688 CWARN(" Length %d, src offset %d\n",
1689 hdr->msg.get.sink_length,
1690 hdr->msg.get.src_offset);
1694 CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
1695 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1696 hdr->msg.ack.dst_wmd.wh_object_cookie,
1697 hdr->msg.ack.mlength);
1700 case LNET_MSG_REPLY:
1701 CWARN(" dst md %#llx.%#llx, length %d\n",
1702 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1703 hdr->msg.reply.dst_wmd.wh_object_cookie,
1704 hdr->payload_length);
1710 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1711 void *private, int rdma_req)
1716 struct lnet_msg *msg;
1717 lnet_pid_t dest_pid;
1718 lnet_nid_t dest_nid;
1720 __u32 payload_length;
1723 LASSERT(!in_interrupt());
1725 type = le32_to_cpu(hdr->type);
1726 src_nid = le64_to_cpu(hdr->src_nid);
1727 dest_nid = le64_to_cpu(hdr->dest_nid);
1728 dest_pid = le32_to_cpu(hdr->dest_pid);
1729 payload_length = le32_to_cpu(hdr->payload_length);
1731 for_me = (ni->ni_nid == dest_nid);
1732 cpt = lnet_cpt_of_nid(from_nid);
1737 if (payload_length > 0) {
1738 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1739 libcfs_nid2str(from_nid),
1740 libcfs_nid2str(src_nid),
1741 lnet_msgtyp2str(type), payload_length);
1747 case LNET_MSG_REPLY:
1748 if (payload_length >
1749 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
1750 CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
1751 libcfs_nid2str(from_nid),
1752 libcfs_nid2str(src_nid),
1753 lnet_msgtyp2str(type),
1755 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1761 CERROR("%s, src %s: Bad message type 0x%x\n",
1762 libcfs_nid2str(from_nid),
1763 libcfs_nid2str(src_nid), type);
1767 if (the_lnet.ln_routing &&
1768 ni->ni_last_alive != ktime_get_real_seconds()) {
1771 /* NB: so far here is the only place to set NI status to "up */
1772 ni->ni_last_alive = ktime_get_real_seconds();
1773 if (ni->ni_status != NULL &&
1774 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1775 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1779 /* Regard a bad destination NID as a protocol error. Senders should
1780 * know what they're doing; if they don't they're misconfigured, buggy
1781 * or malicious so we chop them off at the knees :) */
1784 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1785 /* should have gone direct */
1786 CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
1787 libcfs_nid2str(from_nid),
1788 libcfs_nid2str(src_nid),
1789 libcfs_nid2str(dest_nid));
1793 if (lnet_islocalnid(dest_nid)) {
1794 /* dest is another local NI; sender should have used
1795 * this node's NID on its own network */
1796 CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
1797 libcfs_nid2str(from_nid),
1798 libcfs_nid2str(src_nid),
1799 libcfs_nid2str(dest_nid));
1803 if (rdma_req && type == LNET_MSG_GET) {
1804 CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
1805 libcfs_nid2str(from_nid),
1806 libcfs_nid2str(src_nid),
1807 libcfs_nid2str(dest_nid));
1811 if (!the_lnet.ln_routing) {
1812 CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
1813 libcfs_nid2str(from_nid),
1814 libcfs_nid2str(src_nid),
1815 libcfs_nid2str(dest_nid));
1820 /* Message looks OK; we're not going to return an error, so we MUST
1821 * call back lnd_recv() come what may... */
1823 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
1824 fail_peer(src_nid, 0)) { /* shall we now? */
1825 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1826 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1827 lnet_msgtyp2str(type));
1831 msg = lnet_msg_alloc();
1833 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1834 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1835 lnet_msgtyp2str(type));
1839 /* msg zeroed in lnet_msg_alloc;
1840 * i.e. flags all clear, pointers NULL etc
1843 msg->msg_type = type;
1844 msg->msg_private = private;
1845 msg->msg_receiving = 1;
1846 msg->msg_len = msg->msg_wanted = payload_length;
1847 msg->msg_offset = 0;
1848 msg->msg_hdr = *hdr;
1849 /* for building message event */
1850 msg->msg_from = from_nid;
1852 msg->msg_target.pid = dest_pid;
1853 msg->msg_target.nid = dest_nid;
1854 msg->msg_routing = 1;
1857 /* convert common msg->hdr fields to host byteorder */
1858 msg->msg_hdr.type = type;
1859 msg->msg_hdr.src_nid = src_nid;
1860 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1861 msg->msg_hdr.dest_nid = dest_nid;
1862 msg->msg_hdr.dest_pid = dest_pid;
1863 msg->msg_hdr.payload_length = payload_length;
1867 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
1869 lnet_net_unlock(cpt);
1870 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
1871 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1872 lnet_msgtyp2str(type), rc);
1877 if (lnet_isrouter(msg->msg_rxpeer)) {
1878 lnet_peer_set_alive(msg->msg_rxpeer);
1879 if (avoid_asym_router_failure &&
1880 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
1881 /* received a remote message from router, update
1882 * remote NI status on this router.
1883 * NB: multi-hop routed message will be ignored.
1885 lnet_router_ni_update_locked(msg->msg_rxpeer,
1886 LNET_NIDNET(src_nid));
1890 lnet_msg_commit(msg, cpt);
1893 rc = lnet_parse_forward_locked(ni, msg);
1894 lnet_net_unlock(cpt);
1899 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1900 0, payload_length, payload_length);
1905 lnet_net_unlock(cpt);
1909 rc = lnet_parse_ack(ni, msg);
1912 rc = lnet_parse_put(ni, msg);
1915 rc = lnet_parse_get(ni, msg, rdma_req);
1917 case LNET_MSG_REPLY:
1918 rc = lnet_parse_reply(ni, msg);
1923 goto free_drop; /* prevent an unused label if !kernel */
1929 LASSERT(rc == ENOENT);
1932 LASSERT(msg->msg_md == NULL);
1933 lnet_finalize(ni, msg, rc);
1936 lnet_drop_message(ni, cpt, private, payload_length);
1939 EXPORT_SYMBOL(lnet_parse);
1942 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
1944 while (!list_empty(head)) {
1945 lnet_process_id_t id = {0};
1948 msg = list_entry(head->next, lnet_msg_t, msg_list);
1949 list_del(&msg->msg_list);
1951 id.nid = msg->msg_hdr.src_nid;
1952 id.pid = msg->msg_hdr.src_pid;
1954 LASSERT(msg->msg_md == NULL);
1955 LASSERT(msg->msg_rx_delayed);
1956 LASSERT(msg->msg_rxpeer != NULL);
1957 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1959 CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
1961 msg->msg_hdr.msg.put.ptl_index,
1962 msg->msg_hdr.msg.put.match_bits,
1963 msg->msg_hdr.msg.put.offset,
1964 msg->msg_hdr.payload_length, reason);
1966 /* NB I can't drop msg's ref on msg_rxpeer until after I've
1967 * called lnet_drop_message(), so I just hang onto msg as well
1968 * until that's done */
1970 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1971 msg->msg_rxpeer->lp_cpt,
1972 msg->msg_private, msg->msg_len);
1974 * NB: message will not generate event because w/o attached MD,
1975 * but we still should give error code so lnet_msg_decommit()
1976 * can skip counters operations and other checks.
1978 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
1983 lnet_recv_delayed_msg_list(struct list_head *head)
1985 while (!list_empty(head)) {
1987 lnet_process_id_t id;
1989 msg = list_entry(head->next, lnet_msg_t, msg_list);
1990 list_del(&msg->msg_list);
1992 /* md won't disappear under me, since each msg
1993 * holds a ref on it */
1995 id.nid = msg->msg_hdr.src_nid;
1996 id.pid = msg->msg_hdr.src_pid;
1998 LASSERT(msg->msg_rx_delayed);
1999 LASSERT(msg->msg_md != NULL);
2000 LASSERT(msg->msg_rxpeer != NULL);
2001 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2003 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
2004 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2005 msg->msg_hdr.msg.put.match_bits,
2006 msg->msg_hdr.msg.put.offset,
2007 msg->msg_hdr.payload_length);
2009 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2014 * Initiate an asynchronous PUT operation.
2016 * There are several events associated with a PUT: completion of the send on
2017 * the initiator node (LNET_EVENT_SEND), and when the send completes
2018 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2019 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2020 * used at the target node to indicate the completion of incoming data
2023 * The local events will be logged in the EQ associated with the MD pointed to
2024 * by \a mdh handle. Using a MD without an associated EQ results in these
2025 * events being discarded. In this case, the caller must have another
2026 * mechanism (e.g., a higher level protocol) for determining when it is safe
2027 * to modify the memory region associated with the MD.
2029 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2030 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2032 * \param self Indicates the NID of a local interface through which to send
2033 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2034 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2035 * must be "free floating" (See LNetMDBind()).
2036 * \param ack Controls whether an acknowledgment is requested.
2037 * Acknowledgments are only sent when they are requested by the initiating
2038 * process and the target MD enables them.
2039 * \param target A process identifier for the target process.
2040 * \param portal The index in the \a target's portal table.
2041 * \param match_bits The match bits to use for MD selection at the target
2043 * \param offset The offset into the target MD (only used when the target
2044 * MD has the LNET_MD_MANAGE_REMOTE option set).
2045 * \param hdr_data 64 bits of user data that can be included in the message
2046 * header. This data is written to an event queue entry at the target if an
2047 * EQ is present on the matching MD.
2049 * \retval 0 Success, and only in this case events will be generated
2050 * and logged to EQ (if it exists).
2051 * \retval -EIO Simulated failure.
2052 * \retval -ENOMEM Memory allocation failure.
2053 * \retval -ENOENT Invalid MD object.
2055 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2058 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2059 lnet_process_id_t target, unsigned int portal,
2060 __u64 match_bits, unsigned int offset,
2063 struct lnet_msg *msg;
2064 struct lnet_libmd *md;
2068 LASSERT(the_lnet.ln_init);
2069 LASSERT(the_lnet.ln_refcount > 0);
2071 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2072 fail_peer(target.nid, 1)) { /* shall we now? */
2073 CERROR("Dropping PUT to %s: simulated failure\n",
2074 libcfs_id2str(target));
2078 msg = lnet_msg_alloc();
2080 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2081 libcfs_id2str(target));
2084 msg->msg_vmflush = !!memory_pressure_get();
2086 cpt = lnet_cpt_of_cookie(mdh.cookie);
2089 md = lnet_handle2md(&mdh);
2090 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2091 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
2092 match_bits, portal, libcfs_id2str(target),
2093 md == NULL ? -1 : md->md_threshold);
2094 if (md != NULL && md->md_me != NULL)
2095 CERROR("Source MD also attached to portal %d\n",
2096 md->md_me->me_portal);
2097 lnet_res_unlock(cpt);
2103 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2105 lnet_msg_attach_md(msg, md, 0, 0);
2107 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2109 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2110 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2111 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2112 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2114 /* NB handles only looked up by creator (no flips) */
2115 if (ack == LNET_ACK_REQ) {
2116 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2117 the_lnet.ln_interface_cookie;
2118 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2119 md->md_lh.lh_cookie;
2121 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2122 LNET_WIRE_HANDLE_COOKIE_NONE;
2123 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2124 LNET_WIRE_HANDLE_COOKIE_NONE;
2127 lnet_res_unlock(cpt);
2129 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2131 rc = lnet_send(self, msg, LNET_NID_ANY);
2133 CNETERR("Error sending PUT to %s: %d\n",
2134 libcfs_id2str(target), rc);
2135 lnet_finalize(NULL, msg, rc);
2138 /* completion will be signalled by an event */
2141 EXPORT_SYMBOL(LNetPut);
2144 lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
2146 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2147 * returns a msg for the LND to pass to lnet_finalize() when the sink
2148 * data has been received.
2150 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2151 * lnet_finalize() is called on it, so the LND must call this first */
2153 struct lnet_msg *msg = lnet_msg_alloc();
2154 struct lnet_libmd *getmd = getmsg->msg_md;
2155 lnet_process_id_t peer_id = getmsg->msg_target;
2158 LASSERT(!getmsg->msg_target_is_router);
2159 LASSERT(!getmsg->msg_routing);
2161 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2164 LASSERT(getmd->md_refcount > 0);
2167 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
2168 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2172 if (getmd->md_threshold == 0) {
2173 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
2174 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2176 lnet_res_unlock(cpt);
2180 LASSERT(getmd->md_offset == 0);
2182 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2183 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2185 /* setup information for lnet_build_msg_event */
2186 msg->msg_from = peer_id.nid;
2187 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2188 msg->msg_hdr.src_nid = peer_id.nid;
2189 msg->msg_hdr.payload_length = getmd->md_length;
2190 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2192 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2193 lnet_res_unlock(cpt);
2195 cpt = lnet_cpt_of_nid(peer_id.nid);
2198 lnet_msg_commit(msg, cpt);
2199 lnet_net_unlock(cpt);
2201 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2206 cpt = lnet_cpt_of_nid(peer_id.nid);
2209 the_lnet.ln_counters[cpt]->drop_count++;
2210 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2211 lnet_net_unlock(cpt);
2218 EXPORT_SYMBOL(lnet_create_reply_msg);
2221 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2223 /* Set the REPLY length, now the RDMA that elides the REPLY message has
2224 * completed and I know it. */
2225 LASSERT(reply != NULL);
2226 LASSERT(reply->msg_type == LNET_MSG_GET);
2227 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
2229 /* NB I trusted my peer to RDMA. If she tells me she's written beyond
2230 * the end of my buffer, I might as well be dead. */
2231 LASSERT(len <= reply->msg_ev.mlength);
2233 reply->msg_ev.mlength = len;
2235 EXPORT_SYMBOL(lnet_set_reply_msg_len);
2238 * Initiate an asynchronous GET operation.
2240 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2241 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2242 * the target node in the REPLY has been written to local MD.
2244 * On the target node, an LNET_EVENT_GET is logged when the GET request
2245 * arrives and is accepted into a MD.
2247 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2248 * \param mdh A handle for the MD that describes the memory into which the
2249 * requested data will be received. The MD must be "free floating"
2250 * (See LNetMDBind()).
2252 * \retval 0 Success, and only in this case events will be generated
2253 * and logged to EQ (if it exists) of the MD.
2254 * \retval -EIO Simulated failure.
2255 * \retval -ENOMEM Memory allocation failure.
2256 * \retval -ENOENT Invalid MD object.
2259 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2260 lnet_process_id_t target, unsigned int portal,
2261 __u64 match_bits, unsigned int offset)
2263 struct lnet_msg *msg;
2264 struct lnet_libmd *md;
2268 LASSERT(the_lnet.ln_init);
2269 LASSERT(the_lnet.ln_refcount > 0);
2271 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2272 fail_peer(target.nid, 1)) { /* shall we now? */
2273 CERROR("Dropping GET to %s: simulated failure\n",
2274 libcfs_id2str(target));
2278 msg = lnet_msg_alloc();
2280 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2281 libcfs_id2str(target));
2285 cpt = lnet_cpt_of_cookie(mdh.cookie);
2288 md = lnet_handle2md(&mdh);
2289 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2290 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
2291 match_bits, portal, libcfs_id2str(target),
2292 md == NULL ? -1 : md->md_threshold);
2293 if (md != NULL && md->md_me != NULL)
2294 CERROR("REPLY MD also attached to portal %d\n",
2295 md->md_me->me_portal);
2297 lnet_res_unlock(cpt);
2303 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2305 lnet_msg_attach_md(msg, md, 0, 0);
2307 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2309 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2310 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2311 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2312 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2314 /* NB handles only looked up by creator (no flips) */
2315 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2316 the_lnet.ln_interface_cookie;
2317 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2318 md->md_lh.lh_cookie;
2320 lnet_res_unlock(cpt);
2322 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2324 rc = lnet_send(self, msg, LNET_NID_ANY);
2326 CNETERR("Error sending GET to %s: %d\n",
2327 libcfs_id2str(target), rc);
2328 lnet_finalize(NULL, msg, rc);
2331 /* completion will be signalled by an event */
2334 EXPORT_SYMBOL(LNetGet);
2337 * Calculate distance to node at \a dstnid.
2339 * \param dstnid Target NID.
2340 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2342 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2345 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2346 * local_nid_dist_zero is set, which is the default.
2347 * \retval positives Distance to target NID, i.e. number of hops plus one.
2348 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2351 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2353 struct list_head *e;
2355 lnet_remotenet_t *rnet;
2356 __u32 dstnet = LNET_NIDNET(dstnid);
2360 struct list_head *rn_list;
2362 /* if !local_nid_dist_zero, I don't return a distance of 0 ever
2363 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2364 * keep order 0 free for 0@lo and order 1 free for a local NID
2367 LASSERT(the_lnet.ln_init);
2368 LASSERT(the_lnet.ln_refcount > 0);
2370 cpt = lnet_net_lock_current();
2372 list_for_each(e, &the_lnet.ln_nis) {
2373 ni = list_entry(e, lnet_ni_t, ni_list);
2375 if (ni->ni_nid == dstnid) {
2376 if (srcnidp != NULL)
2378 if (orderp != NULL) {
2379 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2384 lnet_net_unlock(cpt);
2386 return local_nid_dist_zero ? 0 : 1;
2389 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2390 if (srcnidp != NULL)
2391 *srcnidp = ni->ni_nid;
2394 lnet_net_unlock(cpt);
2401 rn_list = lnet_net2rnethash(dstnet);
2402 list_for_each(e, rn_list) {
2403 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2405 if (rnet->lrn_net == dstnet) {
2406 lnet_route_t *route;
2407 lnet_route_t *shortest = NULL;
2409 LASSERT(!list_empty(&rnet->lrn_routes));
2411 list_for_each_entry(route, &rnet->lrn_routes,
2413 if (shortest == NULL ||
2414 route->lr_hops < shortest->lr_hops)
2418 LASSERT(shortest != NULL);
2419 hops = shortest->lr_hops;
2420 if (srcnidp != NULL)
2421 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2424 lnet_net_unlock(cpt);
2430 lnet_net_unlock(cpt);
2431 return -EHOSTUNREACH;
2433 EXPORT_SYMBOL(LNetDist);