2 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2012, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
28 * pro_send_hello : send hello message
29 * pro_recv_hello : receive hello message
30 * pro_pack : pack message header
31 * pro_unpack : unpack message header
32 * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
33 * return 1 if ACK is piggybacked, otherwise return 0
34 * pro_queue_tx_msg() : Called holding BH lock: kss_lock
35 * return the ACK that piggybacked by my message, or NULL
36 * pro_handle_zcreq() : handler of incoming ZC-REQ
37 * pro_handle_zcack() : handler of incoming ZC-ACK
38 * pro_match_tx() : Called holding glock
41 static struct ksock_tx *
42 ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
44 /* V1.x, just enqueue it */
45 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
50 ksocknal_next_tx_carrier(struct ksock_conn *conn)
52 struct ksock_tx *tx = conn->ksnc_tx_carrier;
54 /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
55 LASSERT(!list_empty(&conn->ksnc_tx_queue));
58 /* Next TX that can carry ZC-ACK or LNet message */
59 if (tx->tx_list.next == &conn->ksnc_tx_queue) {
60 /* no more packets queued */
61 conn->ksnc_tx_carrier = NULL;
63 conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
64 LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
69 ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
70 struct ksock_tx *tx_ack, __u64 cookie)
72 struct ksock_tx *tx = conn->ksnc_tx_carrier;
75 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
78 * Enqueue or piggyback tx_ack / cookie
79 * . no tx can piggyback cookie of tx_ack (or cookie), just
80 * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
81 * . There is tx can piggyback cookie of tx_ack (or cookie),
82 * piggyback the cookie and return the tx.
86 list_add_tail(&tx_ack->tx_list,
87 &conn->ksnc_tx_queue);
88 conn->ksnc_tx_carrier = tx_ack;
93 if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
94 /* tx is noop zc-ack, can't piggyback zc-ack cookie */
96 list_add_tail(&tx_ack->tx_list,
97 &conn->ksnc_tx_queue);
101 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
102 LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
105 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
107 /* piggyback the zc-ack cookie */
108 tx->tx_msg.ksm_zc_cookies[1] = cookie;
109 /* move on to the next TX which can carry cookie */
110 ksocknal_next_tx_carrier(conn);
115 static struct ksock_tx *
116 ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
118 struct ksock_tx *tx = conn->ksnc_tx_carrier;
122 * . If there is no NOOP on the connection, just enqueue
123 * tx_msg and return NULL
124 * . If there is NOOP on the connection, piggyback the cookie
125 * and replace the NOOP tx, and return the NOOP tx.
127 if (!tx) { /* nothing on queue */
128 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
129 conn->ksnc_tx_carrier = tx_msg;
133 if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
134 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
138 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
140 /* There is a noop zc-ack can be piggybacked */
141 tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
142 ksocknal_next_tx_carrier(conn);
144 /* use new_tx to replace the noop zc-ack packet */
145 list_add(&tx_msg->tx_list, &tx->tx_list);
146 list_del(&tx->tx_list);
152 ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
153 struct ksock_tx *tx_ack, __u64 cookie)
157 if (conn->ksnc_type != SOCKLND_CONN_ACK)
158 return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
160 /* non-blocking ZC-ACK (to router) */
162 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
164 tx = conn->ksnc_tx_carrier;
167 list_add_tail(&tx_ack->tx_list,
168 &conn->ksnc_tx_queue);
169 conn->ksnc_tx_carrier = tx_ack;
174 /* conn->ksnc_tx_carrier */
177 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
179 if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
182 if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
183 /* replace the keepalive PING with a real ACK */
184 LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
185 tx->tx_msg.ksm_zc_cookies[1] = cookie;
189 if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
190 cookie == tx->tx_msg.ksm_zc_cookies[1]) {
191 CWARN("%s: duplicated ZC cookie: %llu\n",
192 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
193 return 1; /* XXX return error in the future */
196 if (!tx->tx_msg.ksm_zc_cookies[0]) {
198 * NOOP tx has only one ZC-ACK cookie,
199 * can carry at least one more
201 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
202 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
203 tx->tx_msg.ksm_zc_cookies[1] = cookie;
205 tx->tx_msg.ksm_zc_cookies[0] = cookie;
208 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
210 * not likely to carry more ACKs, skip it
213 ksocknal_next_tx_carrier(conn);
219 /* takes two or more cookies already */
221 if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
224 /* two separated cookies: (a+2, a) or (a+1, a) */
225 LASSERT(tx->tx_msg.ksm_zc_cookies[0] -
226 tx->tx_msg.ksm_zc_cookies[1] <= 2);
228 if (tx->tx_msg.ksm_zc_cookies[0] -
229 tx->tx_msg.ksm_zc_cookies[1] == 2) {
230 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
232 } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
233 tmp = tx->tx_msg.ksm_zc_cookies[1];
234 } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
235 tmp = tx->tx_msg.ksm_zc_cookies[0];
239 /* range of cookies */
240 tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
241 tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
247 * ksm_zc_cookies[0] < ksm_zc_cookies[1],
248 * it is range of cookies
250 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
251 cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
252 CWARN("%s: duplicated ZC cookie: %llu\n",
253 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
254 return 1; /* XXX: return error in the future */
257 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
258 tx->tx_msg.ksm_zc_cookies[1] = cookie;
262 if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
263 tx->tx_msg.ksm_zc_cookies[0] = cookie;
268 /* failed to piggyback ZC-ACK */
270 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
271 /* the next tx can piggyback at least 1 ACK */
272 ksocknal_next_tx_carrier(conn);
279 ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
283 #if SOCKNAL_VERSION_DEBUG
284 if (!*ksocknal_tunables.ksnd_typed_conns)
285 return SOCKNAL_MATCH_YES;
288 if (!tx || !tx->tx_lnetmsg) {
290 nob = offsetof(struct ksock_msg, ksm_u);
292 nob = tx->tx_lnetmsg->msg_len +
293 ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
294 sizeof(struct lnet_hdr) : sizeof(struct ksock_msg));
297 /* default checking for typed connection */
298 switch (conn->ksnc_type) {
300 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
302 case SOCKLND_CONN_ANY:
303 return SOCKNAL_MATCH_YES;
305 case SOCKLND_CONN_BULK_IN:
306 return SOCKNAL_MATCH_MAY;
308 case SOCKLND_CONN_BULK_OUT:
309 if (nob < *ksocknal_tunables.ksnd_min_bulk)
310 return SOCKNAL_MATCH_MAY;
312 return SOCKNAL_MATCH_YES;
314 case SOCKLND_CONN_CONTROL:
315 if (nob >= *ksocknal_tunables.ksnd_min_bulk)
316 return SOCKNAL_MATCH_MAY;
318 return SOCKNAL_MATCH_YES;
323 ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
327 if (!tx || !tx->tx_lnetmsg)
328 nob = offsetof(struct ksock_msg, ksm_u);
330 nob = tx->tx_lnetmsg->msg_len + sizeof(struct ksock_msg);
332 switch (conn->ksnc_type) {
334 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
336 case SOCKLND_CONN_ANY:
337 return SOCKNAL_MATCH_NO;
339 case SOCKLND_CONN_ACK:
341 return SOCKNAL_MATCH_YES;
342 else if (!tx || !tx->tx_lnetmsg)
343 return SOCKNAL_MATCH_MAY;
345 return SOCKNAL_MATCH_NO;
347 case SOCKLND_CONN_BULK_OUT:
349 return SOCKNAL_MATCH_NO;
350 else if (nob < *ksocknal_tunables.ksnd_min_bulk)
351 return SOCKNAL_MATCH_MAY;
353 return SOCKNAL_MATCH_YES;
355 case SOCKLND_CONN_CONTROL:
357 return SOCKNAL_MATCH_NO;
358 else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
359 return SOCKNAL_MATCH_MAY;
361 return SOCKNAL_MATCH_YES;
365 /* (Sink) handle incoming ZC request from sender */
367 ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
369 struct ksock_peer *peer = c->ksnc_peer;
370 struct ksock_conn *conn;
374 read_lock(&ksocknal_data.ksnd_global_lock);
376 conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
378 struct ksock_sched *sched = conn->ksnc_scheduler;
380 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
382 spin_lock_bh(&sched->kss_lock);
384 rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
386 spin_unlock_bh(&sched->kss_lock);
388 if (rc) { /* piggybacked */
389 read_unlock(&ksocknal_data.ksnd_global_lock);
394 read_unlock(&ksocknal_data.ksnd_global_lock);
396 /* ACK connection is not ready, or can't piggyback the ACK */
397 tx = ksocknal_alloc_tx_noop(cookie, !!remote);
401 rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
405 ksocknal_free_tx(tx);
409 /* (Sender) handle ZC_ACK from sink */
411 ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
413 struct ksock_peer *peer = conn->ksnc_peer;
415 struct ksock_tx *temp;
416 struct ksock_tx *tmp;
423 count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
425 if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
426 conn->ksnc_proto == &ksocknal_protocol_v3x) {
427 /* keepalive PING for V3.x, just ignore it */
428 return count == 1 ? 0 : -EPROTO;
431 spin_lock(&peer->ksnp_lock);
433 list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list,
435 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
437 if (c == cookie1 || c == cookie2 ||
438 (cookie1 < c && c < cookie2)) {
439 tx->tx_msg.ksm_zc_cookies[0] = 0;
440 list_del(&tx->tx_zc_list);
441 list_add(&tx->tx_zc_list, &zlist);
448 spin_unlock(&peer->ksnp_lock);
450 list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
451 list_del(&tx->tx_zc_list);
452 ksocknal_tx_decref(tx);
455 return !count ? 0 : -EPROTO;
459 ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
461 struct socket *sock = conn->ksnc_sock;
462 struct lnet_hdr *hdr;
463 struct lnet_magicversion *hmv;
467 BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct lnet_hdr, src_nid));
469 LIBCFS_ALLOC(hdr, sizeof(*hdr));
471 CERROR("Can't allocate struct lnet_hdr\n");
475 hmv = (struct lnet_magicversion *)&hdr->dest_nid;
478 * Re-organize V2.x message header to V1.x (struct lnet_hdr)
479 * header and send out
481 hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
482 hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
483 hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
485 if (the_lnet.ln_testprotocompat) {
486 /* single-shot proto check */
488 if (the_lnet.ln_testprotocompat & 1) {
489 hmv->version_major++; /* just different! */
490 the_lnet.ln_testprotocompat &= ~1;
492 if (the_lnet.ln_testprotocompat & 2) {
493 hmv->magic = LNET_PROTO_MAGIC;
494 the_lnet.ln_testprotocompat &= ~2;
499 hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
500 hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
501 hdr->type = cpu_to_le32(LNET_MSG_HELLO);
502 hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
503 hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
504 hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
506 rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
508 CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
509 rc, &conn->ksnc_ipaddr, conn->ksnc_port);
513 if (!hello->kshm_nips)
516 for (i = 0; i < (int)hello->kshm_nips; i++)
517 hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
519 rc = lnet_sock_write(sock, hello->kshm_ips,
520 hello->kshm_nips * sizeof(__u32),
521 lnet_acceptor_timeout());
523 CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
524 rc, hello->kshm_nips,
525 &conn->ksnc_ipaddr, conn->ksnc_port);
528 LIBCFS_FREE(hdr, sizeof(*hdr));
534 ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello)
536 struct socket *sock = conn->ksnc_sock;
539 hello->kshm_magic = LNET_PROTO_MAGIC;
540 hello->kshm_version = conn->ksnc_proto->pro_version;
542 if (the_lnet.ln_testprotocompat) {
543 /* single-shot proto check */
545 if (the_lnet.ln_testprotocompat & 1) {
546 hello->kshm_version++; /* just different! */
547 the_lnet.ln_testprotocompat &= ~1;
552 rc = lnet_sock_write(sock, hello, offsetof(struct ksock_hello_msg, kshm_ips),
553 lnet_acceptor_timeout());
555 CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
556 rc, &conn->ksnc_ipaddr, conn->ksnc_port);
560 if (!hello->kshm_nips)
563 rc = lnet_sock_write(sock, hello->kshm_ips,
564 hello->kshm_nips * sizeof(__u32),
565 lnet_acceptor_timeout());
567 CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
568 rc, hello->kshm_nips,
569 &conn->ksnc_ipaddr, conn->ksnc_port);
576 ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
579 struct socket *sock = conn->ksnc_sock;
580 struct lnet_hdr *hdr;
584 LIBCFS_ALLOC(hdr, sizeof(*hdr));
586 CERROR("Can't allocate struct lnet_hdr\n");
590 rc = lnet_sock_read(sock, &hdr->src_nid,
591 sizeof(*hdr) - offsetof(struct lnet_hdr, src_nid),
594 CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
595 rc, &conn->ksnc_ipaddr);
596 LASSERT(rc < 0 && rc != -EALREADY);
600 /* ...and check we got what we expected */
601 if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) {
602 CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
603 le32_to_cpu(hdr->type),
609 hello->kshm_src_nid = le64_to_cpu(hdr->src_nid);
610 hello->kshm_src_pid = le32_to_cpu(hdr->src_pid);
611 hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation);
612 hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type);
613 hello->kshm_nips = le32_to_cpu(hdr->payload_length) /
616 if (hello->kshm_nips > LNET_MAX_INTERFACES) {
617 CERROR("Bad nips %d from ip %pI4h\n",
618 hello->kshm_nips, &conn->ksnc_ipaddr);
623 if (!hello->kshm_nips)
626 rc = lnet_sock_read(sock, hello->kshm_ips,
627 hello->kshm_nips * sizeof(__u32), timeout);
629 CERROR("Error %d reading IPs from ip %pI4h\n",
630 rc, &conn->ksnc_ipaddr);
631 LASSERT(rc < 0 && rc != -EALREADY);
635 for (i = 0; i < (int)hello->kshm_nips; i++) {
636 hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
638 if (!hello->kshm_ips[i]) {
639 CERROR("Zero IP[%d] from ip %pI4h\n",
640 i, &conn->ksnc_ipaddr);
646 LIBCFS_FREE(hdr, sizeof(*hdr));
652 ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello,
655 struct socket *sock = conn->ksnc_sock;
659 if (hello->kshm_magic == LNET_PROTO_MAGIC)
664 rc = lnet_sock_read(sock, &hello->kshm_src_nid,
665 offsetof(struct ksock_hello_msg, kshm_ips) -
666 offsetof(struct ksock_hello_msg, kshm_src_nid),
669 CERROR("Error %d reading HELLO from %pI4h\n",
670 rc, &conn->ksnc_ipaddr);
671 LASSERT(rc < 0 && rc != -EALREADY);
675 if (conn->ksnc_flip) {
676 __swab32s(&hello->kshm_src_pid);
677 __swab64s(&hello->kshm_src_nid);
678 __swab32s(&hello->kshm_dst_pid);
679 __swab64s(&hello->kshm_dst_nid);
680 __swab64s(&hello->kshm_src_incarnation);
681 __swab64s(&hello->kshm_dst_incarnation);
682 __swab32s(&hello->kshm_ctype);
683 __swab32s(&hello->kshm_nips);
686 if (hello->kshm_nips > LNET_MAX_INTERFACES) {
687 CERROR("Bad nips %d from ip %pI4h\n",
688 hello->kshm_nips, &conn->ksnc_ipaddr);
692 if (!hello->kshm_nips)
695 rc = lnet_sock_read(sock, hello->kshm_ips,
696 hello->kshm_nips * sizeof(__u32), timeout);
698 CERROR("Error %d reading IPs from ip %pI4h\n",
699 rc, &conn->ksnc_ipaddr);
700 LASSERT(rc < 0 && rc != -EALREADY);
704 for (i = 0; i < (int)hello->kshm_nips; i++) {
706 __swab32s(&hello->kshm_ips[i]);
708 if (!hello->kshm_ips[i]) {
709 CERROR("Zero IP[%d] from ip %pI4h\n",
710 i, &conn->ksnc_ipaddr);
719 ksocknal_pack_msg_v1(struct ksock_tx *tx)
721 /* V1.x has no KSOCK_MSG_NOOP */
722 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
723 LASSERT(tx->tx_lnetmsg);
725 tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
726 tx->tx_iov[0].iov_len = sizeof(struct lnet_hdr);
728 tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr);
729 tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr);
733 ksocknal_pack_msg_v2(struct ksock_tx *tx)
735 tx->tx_iov[0].iov_base = &tx->tx_msg;
737 if (tx->tx_lnetmsg) {
738 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
740 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
741 tx->tx_iov[0].iov_len = sizeof(struct ksock_msg);
742 tx->tx_nob = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
743 tx->tx_resid = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
745 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
747 tx->tx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
748 tx->tx_nob = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
749 tx->tx_resid = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
752 * Don't checksum before start sending, because packet can be
753 * piggybacked with ACK
758 ksocknal_unpack_msg_v1(struct ksock_msg *msg)
761 msg->ksm_type = KSOCK_MSG_LNET;
762 msg->ksm_zc_cookies[0] = 0;
763 msg->ksm_zc_cookies[1] = 0;
767 ksocknal_unpack_msg_v2(struct ksock_msg *msg)
769 return; /* Do nothing */
772 struct ksock_proto ksocknal_protocol_v1x = {
773 .pro_version = KSOCK_PROTO_V1,
774 .pro_send_hello = ksocknal_send_hello_v1,
775 .pro_recv_hello = ksocknal_recv_hello_v1,
776 .pro_pack = ksocknal_pack_msg_v1,
777 .pro_unpack = ksocknal_unpack_msg_v1,
778 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
779 .pro_handle_zcreq = NULL,
780 .pro_handle_zcack = NULL,
781 .pro_queue_tx_zcack = NULL,
782 .pro_match_tx = ksocknal_match_tx
785 struct ksock_proto ksocknal_protocol_v2x = {
786 .pro_version = KSOCK_PROTO_V2,
787 .pro_send_hello = ksocknal_send_hello_v2,
788 .pro_recv_hello = ksocknal_recv_hello_v2,
789 .pro_pack = ksocknal_pack_msg_v2,
790 .pro_unpack = ksocknal_unpack_msg_v2,
791 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
792 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
793 .pro_handle_zcreq = ksocknal_handle_zcreq,
794 .pro_handle_zcack = ksocknal_handle_zcack,
795 .pro_match_tx = ksocknal_match_tx
798 struct ksock_proto ksocknal_protocol_v3x = {
799 .pro_version = KSOCK_PROTO_V3,
800 .pro_send_hello = ksocknal_send_hello_v2,
801 .pro_recv_hello = ksocknal_recv_hello_v2,
802 .pro_pack = ksocknal_pack_msg_v2,
803 .pro_unpack = ksocknal_unpack_msg_v2,
804 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
805 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
806 .pro_handle_zcreq = ksocknal_handle_zcreq,
807 .pro_handle_zcack = ksocknal_handle_zcack,
808 .pro_match_tx = ksocknal_match_tx_v3