1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
50 #include "smc_close.h"
52 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
55 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
59 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
60 struct workqueue_struct *smc_close_wq; /* wq for close work */
62 static void smc_tcp_listen_work(struct work_struct *);
63 static void smc_connect_work(struct work_struct *);
65 static void smc_set_keepalive(struct sock *sk, int val)
67 struct smc_sock *smc = smc_sk(sk);
69 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
72 static struct smc_hashinfo smc_v4_hashinfo = {
73 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
76 static struct smc_hashinfo smc_v6_hashinfo = {
77 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
80 int smc_hash_sk(struct sock *sk)
82 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
83 struct hlist_head *head;
87 write_lock_bh(&h->lock);
88 sk_add_node(sk, head);
89 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
90 write_unlock_bh(&h->lock);
94 EXPORT_SYMBOL_GPL(smc_hash_sk);
96 void smc_unhash_sk(struct sock *sk)
98 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
100 write_lock_bh(&h->lock);
101 if (sk_del_node_init(sk))
102 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
103 write_unlock_bh(&h->lock);
105 EXPORT_SYMBOL_GPL(smc_unhash_sk);
107 struct proto smc_proto = {
109 .owner = THIS_MODULE,
110 .keepalive = smc_set_keepalive,
112 .unhash = smc_unhash_sk,
113 .obj_size = sizeof(struct smc_sock),
114 .h.smc_hash = &smc_v4_hashinfo,
115 .slab_flags = SLAB_TYPESAFE_BY_RCU,
117 EXPORT_SYMBOL_GPL(smc_proto);
119 struct proto smc_proto6 = {
121 .owner = THIS_MODULE,
122 .keepalive = smc_set_keepalive,
124 .unhash = smc_unhash_sk,
125 .obj_size = sizeof(struct smc_sock),
126 .h.smc_hash = &smc_v6_hashinfo,
127 .slab_flags = SLAB_TYPESAFE_BY_RCU,
129 EXPORT_SYMBOL_GPL(smc_proto6);
131 static void smc_restore_fallback_changes(struct smc_sock *smc)
133 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
134 smc->clcsock->file->private_data = smc->sk.sk_socket;
135 smc->clcsock->file = NULL;
139 static int __smc_release(struct smc_sock *smc)
141 struct sock *sk = &smc->sk;
144 if (!smc->use_fallback) {
145 rc = smc_close_active(smc);
146 sock_set_flag(sk, SOCK_DEAD);
147 sk->sk_shutdown |= SHUTDOWN_MASK;
149 if (sk->sk_state != SMC_CLOSED) {
150 if (sk->sk_state != SMC_LISTEN &&
151 sk->sk_state != SMC_INIT)
152 sock_put(sk); /* passive closing */
153 if (sk->sk_state == SMC_LISTEN) {
154 /* wake up clcsock accept */
155 rc = kernel_sock_shutdown(smc->clcsock,
158 sk->sk_state = SMC_CLOSED;
159 sk->sk_state_change(sk);
161 smc_restore_fallback_changes(smc);
164 sk->sk_prot->unhash(sk);
166 if (sk->sk_state == SMC_CLOSED) {
169 smc_clcsock_release(smc);
172 if (!smc->use_fallback)
173 smc_conn_free(&smc->conn);
179 static int smc_release(struct socket *sock)
181 struct sock *sk = sock->sk;
182 struct smc_sock *smc;
183 int old_state, rc = 0;
188 sock_hold(sk); /* sock_put below */
191 old_state = sk->sk_state;
193 /* cleanup for a dangling non-blocking connect */
194 if (smc->connect_nonblock && old_state == SMC_INIT)
195 tcp_abort(smc->clcsock->sk, ECONNABORTED);
197 if (cancel_work_sync(&smc->connect_work))
198 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
200 if (sk->sk_state == SMC_LISTEN)
201 /* smc_close_non_accepted() is called and acquires
202 * sock lock for child sockets again
204 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
208 if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
210 smc_close_active_abort(smc);
212 rc = __smc_release(smc);
219 sock_put(sk); /* sock_hold above */
220 sock_put(sk); /* final sock_put */
225 static void smc_destruct(struct sock *sk)
227 if (sk->sk_state != SMC_CLOSED)
229 if (!sock_flag(sk, SOCK_DEAD))
232 sk_refcnt_debug_dec(sk);
235 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
238 struct smc_sock *smc;
242 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
243 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
247 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
248 sk->sk_state = SMC_INIT;
249 sk->sk_destruct = smc_destruct;
250 sk->sk_protocol = protocol;
252 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
253 INIT_WORK(&smc->connect_work, smc_connect_work);
254 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
255 INIT_LIST_HEAD(&smc->accept_q);
256 spin_lock_init(&smc->accept_q_lock);
257 spin_lock_init(&smc->conn.send_lock);
258 sk->sk_prot->hash(sk);
259 sk_refcnt_debug_inc(sk);
260 mutex_init(&smc->clcsock_release_lock);
265 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
268 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
269 struct sock *sk = sock->sk;
270 struct smc_sock *smc;
275 /* replicate tests from inet_bind(), to be safe wrt. future changes */
277 if (addr_len < sizeof(struct sockaddr_in))
281 if (addr->sin_family != AF_INET &&
282 addr->sin_family != AF_INET6 &&
283 addr->sin_family != AF_UNSPEC)
285 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
286 if (addr->sin_family == AF_UNSPEC &&
287 addr->sin_addr.s_addr != htonl(INADDR_ANY))
292 /* Check if socket is already active */
294 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
297 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
298 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
306 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
309 /* options we don't get control via setsockopt for */
310 nsk->sk_type = osk->sk_type;
311 nsk->sk_sndbuf = osk->sk_sndbuf;
312 nsk->sk_rcvbuf = osk->sk_rcvbuf;
313 nsk->sk_sndtimeo = osk->sk_sndtimeo;
314 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
315 nsk->sk_mark = osk->sk_mark;
316 nsk->sk_priority = osk->sk_priority;
317 nsk->sk_rcvlowat = osk->sk_rcvlowat;
318 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
319 nsk->sk_err = osk->sk_err;
321 nsk->sk_flags &= ~mask;
322 nsk->sk_flags |= osk->sk_flags & mask;
325 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
326 (1UL << SOCK_KEEPOPEN) | \
327 (1UL << SOCK_LINGER) | \
328 (1UL << SOCK_BROADCAST) | \
329 (1UL << SOCK_TIMESTAMP) | \
330 (1UL << SOCK_DBG) | \
331 (1UL << SOCK_RCVTSTAMP) | \
332 (1UL << SOCK_RCVTSTAMPNS) | \
333 (1UL << SOCK_LOCALROUTE) | \
334 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
335 (1UL << SOCK_RXQ_OVFL) | \
336 (1UL << SOCK_WIFI_STATUS) | \
337 (1UL << SOCK_NOFCS) | \
338 (1UL << SOCK_FILTER_LOCKED) | \
339 (1UL << SOCK_TSTAMP_NEW))
340 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
341 * clc socket (since smc is not called for these options from net/core)
343 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
345 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
348 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
349 (1UL << SOCK_KEEPOPEN) | \
350 (1UL << SOCK_LINGER) | \
352 /* copy only settings and flags relevant for smc from clc to smc socket */
353 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
355 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
358 /* register the new rmb on all links */
359 static int smcr_lgr_reg_rmbs(struct smc_link *link,
360 struct smc_buf_desc *rmb_desc)
362 struct smc_link_group *lgr = link->lgr;
365 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
368 /* protect against parallel smc_llc_cli_rkey_exchange() and
369 * parallel smcr_link_reg_rmb()
371 mutex_lock(&lgr->llc_conf_mutex);
372 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
373 if (!smc_link_active(&lgr->lnk[i]))
375 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
380 /* exchange confirm_rkey msg with peer */
381 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
386 rmb_desc->is_conf_rkey = true;
388 mutex_unlock(&lgr->llc_conf_mutex);
389 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
393 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
395 struct smc_link *link = smc->conn.lnk;
396 struct smc_llc_qentry *qentry;
399 /* receive CONFIRM LINK request from server over RoCE fabric */
400 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
401 SMC_LLC_CONFIRM_LINK);
403 struct smc_clc_msg_decline dclc;
405 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
406 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
407 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
409 smc_llc_save_peer_uid(qentry);
410 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
411 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
413 return SMC_CLC_DECL_RMBE_EC;
415 rc = smc_ib_modify_qp_rts(link);
417 return SMC_CLC_DECL_ERR_RDYLNK;
419 smc_wr_remember_qp_attr(link);
421 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
422 return SMC_CLC_DECL_ERR_REGRMB;
424 /* confirm_rkey is implicit on 1st contact */
425 smc->conn.rmb_desc->is_conf_rkey = true;
427 /* send CONFIRM LINK response over RoCE fabric */
428 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
430 return SMC_CLC_DECL_TIMEOUT_CL;
432 smc_llc_link_active(link);
433 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
435 /* optional 2nd link, receive ADD LINK request from server */
436 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
439 struct smc_clc_msg_decline dclc;
441 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
442 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
444 rc = 0; /* no DECLINE received, go with one link */
447 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
448 smc_llc_cli_add_link(link, qentry);
452 static void smcr_conn_save_peer_info(struct smc_sock *smc,
453 struct smc_clc_msg_accept_confirm *clc)
455 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
457 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
458 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
459 smc->conn.peer_rmbe_size = bufsize;
460 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
461 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
464 static bool smc_isascii(char *hostname)
468 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
469 if (!isascii(hostname[i]))
474 static void smcd_conn_save_peer_info(struct smc_sock *smc,
475 struct smc_clc_msg_accept_confirm *clc)
477 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
479 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
480 smc->conn.peer_token = clc->d0.token;
481 /* msg header takes up space in the buffer */
482 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
483 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
484 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
485 if (clc->hdr.version > SMC_V1 &&
486 (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
487 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
488 (struct smc_clc_msg_accept_confirm_v2 *)clc;
489 struct smc_clc_first_contact_ext *fce =
490 (struct smc_clc_first_contact_ext *)
491 (((u8 *)clc_v2) + sizeof(*clc_v2));
493 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
495 smc->conn.lgr->peer_os = fce->os_type;
496 smc->conn.lgr->peer_smc_release = fce->release;
497 if (smc_isascii(fce->hostname))
498 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
499 SMC_MAX_HOSTNAME_LEN);
503 static void smc_conn_save_peer_info(struct smc_sock *smc,
504 struct smc_clc_msg_accept_confirm *clc)
506 if (smc->conn.lgr->is_smcd)
507 smcd_conn_save_peer_info(smc, clc);
509 smcr_conn_save_peer_info(smc, clc);
512 static void smc_link_save_peer_info(struct smc_link *link,
513 struct smc_clc_msg_accept_confirm *clc)
515 link->peer_qpn = ntoh24(clc->r0.qpn);
516 memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
517 memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
518 link->peer_psn = ntoh24(clc->r0.psn);
519 link->peer_mtu = clc->r0.qp_mtu;
522 static void smc_switch_to_fallback(struct smc_sock *smc)
524 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
525 wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
528 smc->use_fallback = true;
529 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
530 smc->clcsock->file = smc->sk.sk_socket->file;
531 smc->clcsock->file->private_data = smc->clcsock;
532 smc->clcsock->wq.fasync_list =
533 smc->sk.sk_socket->wq.fasync_list;
535 /* There may be some entries remaining in
536 * smc socket->wq, which should be removed
537 * to clcsocket->wq during the fallback.
539 spin_lock_irqsave(&smc_wait->lock, flags);
540 spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
541 list_splice_init(&smc_wait->head, &clc_wait->head);
542 spin_unlock(&clc_wait->lock);
543 spin_unlock_irqrestore(&smc_wait->lock, flags);
547 /* fall back during connect */
548 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
550 smc_switch_to_fallback(smc);
551 smc->fallback_rsn = reason_code;
552 smc_copy_sock_settings_to_clc(smc);
553 smc->connect_nonblock = 0;
554 if (smc->sk.sk_state == SMC_INIT)
555 smc->sk.sk_state = SMC_ACTIVE;
559 /* decline and fall back during connect */
560 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
565 if (reason_code < 0) { /* error, fallback is not possible */
566 if (smc->sk.sk_state == SMC_INIT)
567 sock_put(&smc->sk); /* passive closing */
570 if (reason_code != SMC_CLC_DECL_PEERDECL) {
571 rc = smc_clc_send_decline(smc, reason_code, version);
573 if (smc->sk.sk_state == SMC_INIT)
574 sock_put(&smc->sk); /* passive closing */
578 return smc_connect_fallback(smc, reason_code);
581 /* abort connecting */
582 static void smc_connect_abort(struct smc_sock *smc, int local_first)
585 smc_lgr_cleanup_early(&smc->conn);
587 smc_conn_free(&smc->conn);
590 /* check if there is a rdma device available for this connection. */
591 /* called for connect and listen */
592 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
594 /* PNET table look up: search active ib_device and port
595 * within same PNETID that also contains the ethernet device
596 * used for the internal TCP socket
598 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
600 return SMC_CLC_DECL_NOSMCRDEV;
604 /* check if there is an ISM device available for this connection. */
605 /* called for connect and listen */
606 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
608 /* Find ISM device with same PNETID as connecting interface */
609 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
610 if (!ini->ism_dev[0])
611 return SMC_CLC_DECL_NOSMCDDEV;
613 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
617 /* is chid unique for the ism devices that are already determined? */
618 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
621 int i = (!ini->ism_dev[0]) ? 1 : 0;
624 if (ini->ism_chid[i] == chid)
629 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
630 * PNETID matching net_device)
632 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
633 struct smc_init_info *ini)
635 int rc = SMC_CLC_DECL_NOSMCDDEV;
636 struct smcd_dev *smcd;
640 if (smcd_indicated(ini->smc_type_v1))
641 rc = 0; /* already initialized for V1 */
642 mutex_lock(&smcd_dev_list.mutex);
643 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
644 if (smcd->going_away || smcd == ini->ism_dev[0])
646 chid = smc_ism_get_chid(smcd);
647 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
649 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
650 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
651 ini->ism_dev[i] = smcd;
652 ini->ism_chid[i] = chid;
656 if (i > SMC_MAX_ISM_DEVS)
660 mutex_unlock(&smcd_dev_list.mutex);
661 ini->ism_offered_cnt = i - 1;
662 if (!ini->ism_dev[0] && !ini->ism_dev[1])
663 ini->smcd_version = 0;
668 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
669 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
670 struct smc_init_info *ini)
672 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
673 return SMC_CLC_DECL_ISMVLANERR;
677 static int smc_find_proposal_devices(struct smc_sock *smc,
678 struct smc_init_info *ini)
682 /* check if there is an ism device available */
683 if (ini->smcd_version & SMC_V1) {
684 if (smc_find_ism_device(smc, ini) ||
685 smc_connect_ism_vlan_setup(smc, ini)) {
686 if (ini->smc_type_v1 == SMC_TYPE_B)
687 ini->smc_type_v1 = SMC_TYPE_R;
689 ini->smc_type_v1 = SMC_TYPE_N;
690 } /* else ISM V1 is supported for this connection */
691 if (smc_find_rdma_device(smc, ini)) {
692 if (ini->smc_type_v1 == SMC_TYPE_B)
693 ini->smc_type_v1 = SMC_TYPE_D;
695 ini->smc_type_v1 = SMC_TYPE_N;
696 } /* else RDMA is supported for this connection */
698 if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini))
699 ini->smc_type_v2 = SMC_TYPE_N;
701 /* if neither ISM nor RDMA are supported, fallback */
702 if (!smcr_indicated(ini->smc_type_v1) &&
703 ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
704 rc = SMC_CLC_DECL_NOSMCDEV;
709 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
710 * used, the VLAN ID will be registered again during the connection setup.
712 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
713 struct smc_init_info *ini)
715 if (!smcd_indicated(ini->smc_type_v1))
717 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
718 return SMC_CLC_DECL_CNFERR;
722 #define SMC_CLC_MAX_ACCEPT_LEN \
723 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
724 sizeof(struct smc_clc_first_contact_ext) + \
725 sizeof(struct smc_clc_msg_trail))
727 /* CLC handshake during connect */
728 static int smc_connect_clc(struct smc_sock *smc,
729 struct smc_clc_msg_accept_confirm_v2 *aclc2,
730 struct smc_init_info *ini)
734 /* do inband token exchange */
735 rc = smc_clc_send_proposal(smc, ini);
738 /* receive SMC Accept CLC message */
739 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
740 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
743 /* setup for RDMA connection of client */
744 static int smc_connect_rdma(struct smc_sock *smc,
745 struct smc_clc_msg_accept_confirm *aclc,
746 struct smc_init_info *ini)
748 int i, reason_code = 0;
749 struct smc_link *link;
751 ini->is_smcd = false;
752 ini->ib_lcl = &aclc->r0.lcl;
753 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
754 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
756 mutex_lock(&smc_client_lgr_pending);
757 reason_code = smc_conn_create(smc, ini);
759 mutex_unlock(&smc_client_lgr_pending);
763 smc_conn_save_peer_info(smc, aclc);
765 if (ini->first_contact_local) {
766 link = smc->conn.lnk;
768 /* set link that was assigned by server */
770 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
771 struct smc_link *l = &smc->conn.lgr->lnk[i];
773 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
774 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
776 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
777 sizeof(l->peer_mac))) {
783 reason_code = SMC_CLC_DECL_NOSRVLINK;
786 smc->conn.lnk = link;
789 /* create send buffer and rmb */
790 if (smc_buf_create(smc, false)) {
791 reason_code = SMC_CLC_DECL_MEM;
795 if (ini->first_contact_local)
796 smc_link_save_peer_info(link, aclc);
798 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
799 reason_code = SMC_CLC_DECL_ERR_RTOK;
806 if (ini->first_contact_local) {
807 if (smc_ib_ready_link(link)) {
808 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
812 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
813 reason_code = SMC_CLC_DECL_ERR_REGRMB;
817 smc_rmb_sync_sg_for_device(&smc->conn);
819 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
826 if (ini->first_contact_local) {
827 /* QP confirmation over RoCE fabric */
828 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
829 reason_code = smcr_clnt_conf_first_link(smc);
830 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
834 mutex_unlock(&smc_client_lgr_pending);
836 smc_copy_sock_settings_to_clc(smc);
837 smc->connect_nonblock = 0;
838 if (smc->sk.sk_state == SMC_INIT)
839 smc->sk.sk_state = SMC_ACTIVE;
843 smc_connect_abort(smc, ini->first_contact_local);
844 mutex_unlock(&smc_client_lgr_pending);
845 smc->connect_nonblock = 0;
850 /* The server has chosen one of the proposed ISM devices for the communication.
851 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
854 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
855 struct smc_init_info *ini)
859 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
860 if (ini->ism_chid[i] == ntohs(aclc->chid)) {
861 ini->ism_selected = i;
869 /* setup for ISM connection of client */
870 static int smc_connect_ism(struct smc_sock *smc,
871 struct smc_clc_msg_accept_confirm *aclc,
872 struct smc_init_info *ini)
877 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
879 if (aclc->hdr.version == SMC_V2) {
880 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
881 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
883 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
887 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
889 /* there is only one lgr role for SMC-D; use server lock */
890 mutex_lock(&smc_server_lgr_pending);
891 rc = smc_conn_create(smc, ini);
893 mutex_unlock(&smc_server_lgr_pending);
897 /* Create send and receive buffers */
898 rc = smc_buf_create(smc, true);
900 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
904 smc_conn_save_peer_info(smc, aclc);
909 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
913 mutex_unlock(&smc_server_lgr_pending);
915 smc_copy_sock_settings_to_clc(smc);
916 smc->connect_nonblock = 0;
917 if (smc->sk.sk_state == SMC_INIT)
918 smc->sk.sk_state = SMC_ACTIVE;
922 smc_connect_abort(smc, ini->first_contact_local);
923 mutex_unlock(&smc_server_lgr_pending);
924 smc->connect_nonblock = 0;
929 /* check if received accept type and version matches a proposed one */
930 static int smc_connect_check_aclc(struct smc_init_info *ini,
931 struct smc_clc_msg_accept_confirm *aclc)
933 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
934 !smcr_indicated(ini->smc_type_v1)) ||
935 (aclc->hdr.typev1 == SMC_TYPE_D &&
936 ((!smcd_indicated(ini->smc_type_v1) &&
937 !smcd_indicated(ini->smc_type_v2)) ||
938 (aclc->hdr.version == SMC_V1 &&
939 !smcd_indicated(ini->smc_type_v1)) ||
940 (aclc->hdr.version == SMC_V2 &&
941 !smcd_indicated(ini->smc_type_v2)))))
942 return SMC_CLC_DECL_MODEUNSUPP;
947 /* perform steps before actually connecting */
948 static int __smc_connect(struct smc_sock *smc)
950 u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
951 struct smc_clc_msg_accept_confirm_v2 *aclc2;
952 struct smc_clc_msg_accept_confirm *aclc;
953 struct smc_init_info *ini = NULL;
957 if (smc->use_fallback)
958 return smc_connect_fallback(smc, smc->fallback_rsn);
960 /* if peer has not signalled SMC-capability, fall back */
961 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
962 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
964 /* IPSec connections opt out of SMC optimizations */
965 if (using_ipsec(smc))
966 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
969 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
971 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
974 ini->smcd_version = SMC_V1;
975 ini->smcd_version |= smc_ism_v2_capable ? SMC_V2 : 0;
976 ini->smc_type_v1 = SMC_TYPE_B;
977 ini->smc_type_v2 = smc_ism_v2_capable ? SMC_TYPE_D : SMC_TYPE_N;
979 /* get vlan id from IP device */
980 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
981 ini->smcd_version &= ~SMC_V1;
982 ini->smc_type_v1 = SMC_TYPE_N;
983 if (!ini->smcd_version) {
984 rc = SMC_CLC_DECL_GETVLANERR;
989 rc = smc_find_proposal_devices(smc, ini);
993 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
995 rc = SMC_CLC_DECL_MEM;
998 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
999 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1001 /* perform CLC handshake */
1002 rc = smc_connect_clc(smc, aclc2, ini);
1006 /* check if smc modes and versions of CLC proposal and accept match */
1007 rc = smc_connect_check_aclc(ini, aclc);
1008 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1009 ini->smcd_version = version;
1013 /* depending on previous steps, connect using rdma or ism */
1014 if (aclc->hdr.typev1 == SMC_TYPE_R)
1015 rc = smc_connect_rdma(smc, aclc, ini);
1016 else if (aclc->hdr.typev1 == SMC_TYPE_D)
1017 rc = smc_connect_ism(smc, aclc, ini);
1021 smc_connect_ism_vlan_cleanup(smc, ini);
1027 smc_connect_ism_vlan_cleanup(smc, ini);
1031 return smc_connect_decline_fallback(smc, rc, version);
1034 static void smc_connect_work(struct work_struct *work)
1036 struct smc_sock *smc = container_of(work, struct smc_sock,
1038 long timeo = smc->sk.sk_sndtimeo;
1042 timeo = MAX_SCHEDULE_TIMEOUT;
1043 lock_sock(smc->clcsock->sk);
1044 if (smc->clcsock->sk->sk_err) {
1045 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1046 } else if ((1 << smc->clcsock->sk->sk_state) &
1047 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1048 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1049 if ((rc == -EPIPE) &&
1050 ((1 << smc->clcsock->sk->sk_state) &
1051 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1054 release_sock(smc->clcsock->sk);
1055 lock_sock(&smc->sk);
1056 if (rc != 0 || smc->sk.sk_err) {
1057 smc->sk.sk_state = SMC_CLOSED;
1058 if (rc == -EPIPE || rc == -EAGAIN)
1059 smc->sk.sk_err = EPIPE;
1060 else if (rc == -ECONNREFUSED)
1061 smc->sk.sk_err = ECONNREFUSED;
1062 else if (signal_pending(current))
1063 smc->sk.sk_err = -sock_intr_errno(timeo);
1064 sock_put(&smc->sk); /* passive closing */
1068 rc = __smc_connect(smc);
1070 smc->sk.sk_err = -rc;
1073 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1074 if (smc->sk.sk_err) {
1075 smc->sk.sk_state_change(&smc->sk);
1076 } else { /* allow polling before and after fallback decision */
1077 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1078 smc->sk.sk_write_space(&smc->sk);
1081 release_sock(&smc->sk);
1084 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1085 int alen, int flags)
1087 struct sock *sk = sock->sk;
1088 struct smc_sock *smc;
1093 /* separate smc parameter checking to be safe */
1094 if (alen < sizeof(addr->sa_family))
1096 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1100 switch (sk->sk_state) {
1111 smc_copy_sock_settings_to_clc(smc);
1112 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1113 if (smc->connect_nonblock) {
1117 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1118 if (rc && rc != -EINPROGRESS)
1121 if (smc->use_fallback)
1123 sock_hold(&smc->sk); /* sock put in passive closing */
1124 if (flags & O_NONBLOCK) {
1125 if (queue_work(smc_hs_wq, &smc->connect_work))
1126 smc->connect_nonblock = 1;
1129 rc = __smc_connect(smc);
1133 rc = 0; /* success cases including fallback */
1142 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1144 struct socket *new_clcsock = NULL;
1145 struct sock *lsk = &lsmc->sk;
1146 struct sock *new_sk;
1150 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1153 lsk->sk_err = ENOMEM;
1158 *new_smc = smc_sk(new_sk);
1160 mutex_lock(&lsmc->clcsock_release_lock);
1162 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1163 mutex_unlock(&lsmc->clcsock_release_lock);
1165 if (rc < 0 && rc != -EAGAIN)
1167 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1168 new_sk->sk_prot->unhash(new_sk);
1170 sock_release(new_clcsock);
1171 new_sk->sk_state = SMC_CLOSED;
1172 sock_set_flag(new_sk, SOCK_DEAD);
1173 sock_put(new_sk); /* final */
1178 /* new clcsock has inherited the smc listen-specific sk_data_ready
1179 * function; switch it back to the original sk_data_ready function
1181 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1182 (*new_smc)->clcsock = new_clcsock;
1187 /* add a just created sock to the accept queue of the listen sock as
1188 * candidate for a following socket accept call from user space
1190 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1192 struct smc_sock *par = smc_sk(parent);
1194 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1195 spin_lock(&par->accept_q_lock);
1196 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1197 spin_unlock(&par->accept_q_lock);
1198 sk_acceptq_added(parent);
1201 /* remove a socket from the accept queue of its parental listening socket */
1202 static void smc_accept_unlink(struct sock *sk)
1204 struct smc_sock *par = smc_sk(sk)->listen_smc;
1206 spin_lock(&par->accept_q_lock);
1207 list_del_init(&smc_sk(sk)->accept_q);
1208 spin_unlock(&par->accept_q_lock);
1209 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1210 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1213 /* remove a sock from the accept queue to bind it to a new socket created
1214 * for a socket accept call from user space
1216 struct sock *smc_accept_dequeue(struct sock *parent,
1217 struct socket *new_sock)
1219 struct smc_sock *isk, *n;
1220 struct sock *new_sk;
1222 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1223 new_sk = (struct sock *)isk;
1225 smc_accept_unlink(new_sk);
1226 if (new_sk->sk_state == SMC_CLOSED) {
1227 new_sk->sk_prot->unhash(new_sk);
1229 sock_release(isk->clcsock);
1230 isk->clcsock = NULL;
1232 sock_put(new_sk); /* final */
1236 sock_graft(new_sk, new_sock);
1237 if (isk->use_fallback) {
1238 smc_sk(new_sk)->clcsock->file = new_sock->file;
1239 isk->clcsock->file->private_data = isk->clcsock;
1247 /* clean up for a created but never accepted sock */
1248 void smc_close_non_accepted(struct sock *sk)
1250 struct smc_sock *smc = smc_sk(sk);
1252 sock_hold(sk); /* sock_put below */
1254 if (!sk->sk_lingertime)
1255 /* wait for peer closing */
1256 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1259 sock_put(sk); /* sock_hold above */
1260 sock_put(sk); /* final sock_put */
1263 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1265 struct smc_link *link = smc->conn.lnk;
1266 struct smc_llc_qentry *qentry;
1269 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1270 return SMC_CLC_DECL_ERR_REGRMB;
1272 /* send CONFIRM LINK request to client over the RoCE fabric */
1273 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1275 return SMC_CLC_DECL_TIMEOUT_CL;
1277 /* receive CONFIRM LINK response from client over the RoCE fabric */
1278 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1279 SMC_LLC_CONFIRM_LINK);
1281 struct smc_clc_msg_decline dclc;
1283 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1284 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1285 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1287 smc_llc_save_peer_uid(qentry);
1288 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1289 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1291 return SMC_CLC_DECL_RMBE_EC;
1293 /* confirm_rkey is implicit on 1st contact */
1294 smc->conn.rmb_desc->is_conf_rkey = true;
1296 smc_llc_link_active(link);
1297 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1299 /* initial contact - try to establish second link */
1300 smc_llc_srv_add_link(link);
1304 /* listen worker: finish */
1305 static void smc_listen_out(struct smc_sock *new_smc)
1307 struct smc_sock *lsmc = new_smc->listen_smc;
1308 struct sock *newsmcsk = &new_smc->sk;
1310 if (lsmc->sk.sk_state == SMC_LISTEN) {
1311 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1312 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1313 release_sock(&lsmc->sk);
1314 } else { /* no longer listening */
1315 smc_close_non_accepted(newsmcsk);
1318 /* Wake up accept */
1319 lsmc->sk.sk_data_ready(&lsmc->sk);
1320 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1323 /* listen worker: finish in state connected */
1324 static void smc_listen_out_connected(struct smc_sock *new_smc)
1326 struct sock *newsmcsk = &new_smc->sk;
1328 if (newsmcsk->sk_state == SMC_INIT)
1329 newsmcsk->sk_state = SMC_ACTIVE;
1331 smc_listen_out(new_smc);
1334 /* listen worker: finish in error state */
1335 static void smc_listen_out_err(struct smc_sock *new_smc)
1337 struct sock *newsmcsk = &new_smc->sk;
1339 if (newsmcsk->sk_state == SMC_INIT)
1340 sock_put(&new_smc->sk); /* passive closing */
1341 newsmcsk->sk_state = SMC_CLOSED;
1343 smc_listen_out(new_smc);
1346 /* listen worker: decline and fall back if possible */
1347 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1348 int local_first, u8 version)
1350 /* RDMA setup failed, switch back to TCP */
1352 smc_lgr_cleanup_early(&new_smc->conn);
1354 smc_conn_free(&new_smc->conn);
1355 if (reason_code < 0) { /* error, no fallback possible */
1356 smc_listen_out_err(new_smc);
1359 smc_switch_to_fallback(new_smc);
1360 new_smc->fallback_rsn = reason_code;
1361 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1362 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1363 smc_listen_out_err(new_smc);
1367 smc_listen_out_connected(new_smc);
1370 /* listen worker: version checking */
1371 static int smc_listen_v2_check(struct smc_sock *new_smc,
1372 struct smc_clc_msg_proposal *pclc,
1373 struct smc_init_info *ini)
1375 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1376 struct smc_clc_v2_extension *pclc_v2_ext;
1378 ini->smc_type_v1 = pclc->hdr.typev1;
1379 ini->smc_type_v2 = pclc->hdr.typev2;
1380 ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
1381 if (pclc->hdr.version > SMC_V1)
1382 ini->smcd_version |=
1383 ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
1384 if (!smc_ism_v2_capable) {
1385 ini->smcd_version &= ~SMC_V2;
1388 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1390 ini->smcd_version &= ~SMC_V2;
1393 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1394 if (!pclc_smcd_v2_ext)
1395 ini->smcd_version &= ~SMC_V2;
1398 if (!ini->smcd_version) {
1399 if (pclc->hdr.typev1 == SMC_TYPE_B ||
1400 pclc->hdr.typev2 == SMC_TYPE_B)
1401 return SMC_CLC_DECL_NOSMCDEV;
1402 if (pclc->hdr.typev1 == SMC_TYPE_D ||
1403 pclc->hdr.typev2 == SMC_TYPE_D)
1404 return SMC_CLC_DECL_NOSMCDDEV;
1405 return SMC_CLC_DECL_NOSMCRDEV;
1411 /* listen worker: check prefixes */
1412 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1413 struct smc_clc_msg_proposal *pclc)
1415 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1416 struct socket *newclcsock = new_smc->clcsock;
1418 if (pclc->hdr.typev1 == SMC_TYPE_N)
1420 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1421 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1422 return SMC_CLC_DECL_DIFFPREFIX;
1427 /* listen worker: initialize connection and buffers */
1428 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1429 struct smc_init_info *ini)
1433 /* allocate connection / link group */
1434 rc = smc_conn_create(new_smc, ini);
1438 /* create send buffer and rmb */
1439 if (smc_buf_create(new_smc, false))
1440 return SMC_CLC_DECL_MEM;
1445 /* listen worker: initialize connection and buffers for SMC-D */
1446 static int smc_listen_ism_init(struct smc_sock *new_smc,
1447 struct smc_init_info *ini)
1451 rc = smc_conn_create(new_smc, ini);
1455 /* Create send and receive buffers */
1456 rc = smc_buf_create(new_smc, true);
1458 if (ini->first_contact_local)
1459 smc_lgr_cleanup_early(&new_smc->conn);
1461 smc_conn_free(&new_smc->conn);
1462 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1469 static bool smc_is_already_selected(struct smcd_dev *smcd,
1470 struct smc_init_info *ini,
1475 for (i = 0; i < matches; i++)
1476 if (smcd == ini->ism_dev[i])
1482 /* check for ISM devices matching proposed ISM devices */
1483 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1484 u16 proposed_chid, u64 proposed_gid,
1485 unsigned int *matches)
1487 struct smcd_dev *smcd;
1489 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1490 if (smcd->going_away)
1492 if (smc_is_already_selected(smcd, ini, *matches))
1494 if (smc_ism_get_chid(smcd) == proposed_chid &&
1495 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1496 ini->ism_peer_gid[*matches] = proposed_gid;
1497 ini->ism_dev[*matches] = smcd;
1504 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1505 struct smc_clc_msg_proposal *pclc,
1506 struct smc_init_info *ini)
1508 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1509 struct smc_clc_v2_extension *smc_v2_ext;
1510 struct smc_clc_msg_smcd *pclc_smcd;
1511 unsigned int matches = 0;
1516 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1519 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1520 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1521 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1523 !smc_v2_ext->hdr.flag.seid) /* no system EID support for SMCD */
1526 mutex_lock(&smcd_dev_list.mutex);
1527 if (pclc_smcd->ism.chid)
1528 /* check for ISM device matching proposed native ISM device */
1529 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1530 ntohll(pclc_smcd->ism.gid), &matches);
1531 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1532 /* check for ISM devices matching proposed non-native ISM
1535 smc_check_ism_v2_match(ini,
1536 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1537 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1540 mutex_unlock(&smcd_dev_list.mutex);
1542 if (ini->ism_dev[0]) {
1543 smc_ism_get_system_eid(ini->ism_dev[0], &eid);
1544 if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
1550 /* separate - outside the smcd_dev_list.lock */
1551 smcd_version = ini->smcd_version;
1552 for (i = 0; i < matches; i++) {
1553 ini->smcd_version = SMC_V2;
1554 ini->is_smcd = true;
1555 ini->ism_selected = i;
1556 if (smc_listen_ism_init(new_smc, ini))
1557 /* try next active ISM device */
1559 return; /* matching and usable V2 ISM device found */
1561 /* no V2 ISM device could be initialized */
1562 ini->smcd_version = smcd_version; /* restore original value */
1565 ini->smcd_version &= ~SMC_V2;
1566 ini->ism_dev[0] = NULL;
1567 ini->is_smcd = false;
1570 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1571 struct smc_clc_msg_proposal *pclc,
1572 struct smc_init_info *ini)
1574 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1576 /* check if ISM V1 is available */
1577 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1579 ini->is_smcd = true; /* prepare ISM check */
1580 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1581 if (smc_find_ism_device(new_smc, ini))
1583 ini->ism_selected = 0;
1584 if (!smc_listen_ism_init(new_smc, ini))
1585 return; /* V1 ISM device found */
1588 ini->ism_dev[0] = NULL;
1589 ini->is_smcd = false;
1592 /* listen worker: register buffers */
1593 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1595 struct smc_connection *conn = &new_smc->conn;
1598 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1599 return SMC_CLC_DECL_ERR_REGRMB;
1601 smc_rmb_sync_sg_for_device(&new_smc->conn);
1606 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1607 struct smc_clc_msg_proposal *pclc,
1608 struct smc_init_info *ini)
1612 if (!smcr_indicated(ini->smc_type_v1))
1613 return SMC_CLC_DECL_NOSMCDEV;
1615 /* prepare RDMA check */
1616 ini->ib_lcl = &pclc->lcl;
1617 rc = smc_find_rdma_device(new_smc, ini);
1619 /* no RDMA device found */
1620 if (ini->smc_type_v1 == SMC_TYPE_B)
1621 /* neither ISM nor RDMA device found */
1622 rc = SMC_CLC_DECL_NOSMCDEV;
1625 rc = smc_listen_rdma_init(new_smc, ini);
1628 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1631 /* determine the local device matching to proposal */
1632 static int smc_listen_find_device(struct smc_sock *new_smc,
1633 struct smc_clc_msg_proposal *pclc,
1634 struct smc_init_info *ini)
1638 /* check for ISM device matching V2 proposed device */
1639 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
1640 if (ini->ism_dev[0])
1643 if (!(ini->smcd_version & SMC_V1))
1644 return SMC_CLC_DECL_NOSMCDEV;
1646 /* check for matching IP prefix and subnet length */
1647 rc = smc_listen_prfx_check(new_smc, pclc);
1651 /* get vlan id from IP device */
1652 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
1653 return SMC_CLC_DECL_GETVLANERR;
1655 /* check for ISM device matching V1 proposed device */
1656 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
1657 if (ini->ism_dev[0])
1660 if (pclc->hdr.typev1 == SMC_TYPE_D)
1661 return SMC_CLC_DECL_NOSMCDDEV; /* skip RDMA and decline */
1663 /* check if RDMA is available */
1664 return smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
1667 /* listen worker: finish RDMA setup */
1668 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1669 struct smc_clc_msg_accept_confirm *cclc,
1672 struct smc_link *link = new_smc->conn.lnk;
1673 int reason_code = 0;
1676 smc_link_save_peer_info(link, cclc);
1678 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
1679 return SMC_CLC_DECL_ERR_RTOK;
1682 if (smc_ib_ready_link(link))
1683 return SMC_CLC_DECL_ERR_RDYLNK;
1684 /* QP confirmation over RoCE fabric */
1685 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1686 reason_code = smcr_serv_conf_first_link(new_smc);
1687 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1692 /* setup for connection of server */
1693 static void smc_listen_work(struct work_struct *work)
1695 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1697 u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
1698 struct socket *newclcsock = new_smc->clcsock;
1699 struct smc_clc_msg_accept_confirm *cclc;
1700 struct smc_clc_msg_proposal_area *buf;
1701 struct smc_clc_msg_proposal *pclc;
1702 struct smc_init_info *ini = NULL;
1705 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1706 return smc_listen_out_err(new_smc);
1708 if (new_smc->use_fallback) {
1709 smc_listen_out_connected(new_smc);
1713 /* check if peer is smc capable */
1714 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1715 smc_switch_to_fallback(new_smc);
1716 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1717 smc_listen_out_connected(new_smc);
1721 /* do inband token exchange -
1722 * wait for and receive SMC Proposal CLC message
1724 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
1726 rc = SMC_CLC_DECL_MEM;
1729 pclc = (struct smc_clc_msg_proposal *)buf;
1730 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
1731 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1734 version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
1736 /* IPSec connections opt out of SMC optimizations */
1737 if (using_ipsec(new_smc)) {
1738 rc = SMC_CLC_DECL_IPSEC;
1742 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1744 rc = SMC_CLC_DECL_MEM;
1748 /* initial version checking */
1749 rc = smc_listen_v2_check(new_smc, pclc, ini);
1753 mutex_lock(&smc_server_lgr_pending);
1754 smc_close_init(new_smc);
1755 smc_rx_init(new_smc);
1756 smc_tx_init(new_smc);
1758 /* determine ISM or RoCE device used for connection */
1759 rc = smc_listen_find_device(new_smc, pclc, ini);
1763 /* send SMC Accept CLC message */
1764 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
1765 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
1769 /* SMC-D does not need this lock any more */
1771 mutex_unlock(&smc_server_lgr_pending);
1773 /* receive SMC Confirm CLC message */
1774 memset(buf, 0, sizeof(*buf));
1775 cclc = (struct smc_clc_msg_accept_confirm *)buf;
1776 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
1777 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1785 if (!ini->is_smcd) {
1786 rc = smc_listen_rdma_finish(new_smc, cclc,
1787 ini->first_contact_local);
1790 mutex_unlock(&smc_server_lgr_pending);
1792 smc_conn_save_peer_info(new_smc, cclc);
1793 smc_listen_out_connected(new_smc);
1797 mutex_unlock(&smc_server_lgr_pending);
1799 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
1806 static void smc_tcp_listen_work(struct work_struct *work)
1808 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1810 struct sock *lsk = &lsmc->sk;
1811 struct smc_sock *new_smc;
1815 while (lsk->sk_state == SMC_LISTEN) {
1816 rc = smc_clcsock_accept(lsmc, &new_smc);
1817 if (rc) /* clcsock accept queue empty or error */
1822 new_smc->listen_smc = lsmc;
1823 new_smc->use_fallback = lsmc->use_fallback;
1824 new_smc->fallback_rsn = lsmc->fallback_rsn;
1825 sock_hold(lsk); /* sock_put in smc_listen_work */
1826 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1827 smc_copy_sock_settings_to_smc(new_smc);
1828 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1829 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1830 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1831 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
1832 sock_put(&new_smc->sk);
1837 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
1840 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
1842 struct smc_sock *lsmc;
1844 lsmc = (struct smc_sock *)
1845 ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
1848 lsmc->clcsk_data_ready(listen_clcsock);
1849 if (lsmc->sk.sk_state == SMC_LISTEN) {
1850 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
1851 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
1852 sock_put(&lsmc->sk);
1856 static int smc_listen(struct socket *sock, int backlog)
1858 struct sock *sk = sock->sk;
1859 struct smc_sock *smc;
1866 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1867 smc->connect_nonblock)
1871 if (sk->sk_state == SMC_LISTEN) {
1872 sk->sk_max_ack_backlog = backlog;
1875 /* some socket options are handled in core, so we could not apply
1876 * them to the clc socket -- copy smc socket options to clc socket
1878 smc_copy_sock_settings_to_clc(smc);
1879 if (!smc->use_fallback)
1880 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1882 /* save original sk_data_ready function and establish
1883 * smc-specific sk_data_ready function
1885 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
1886 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
1887 smc->clcsock->sk->sk_user_data =
1888 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
1889 rc = kernel_listen(smc->clcsock, backlog);
1891 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
1894 sk->sk_max_ack_backlog = backlog;
1895 sk->sk_ack_backlog = 0;
1896 sk->sk_state = SMC_LISTEN;
1903 static int smc_accept(struct socket *sock, struct socket *new_sock,
1904 int flags, bool kern)
1906 struct sock *sk = sock->sk, *nsk;
1907 DECLARE_WAITQUEUE(wait, current);
1908 struct smc_sock *lsmc;
1913 sock_hold(sk); /* sock_put below */
1916 if (lsmc->sk.sk_state != SMC_LISTEN) {
1922 /* Wait for an incoming connection */
1923 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1924 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1925 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1926 set_current_state(TASK_INTERRUPTIBLE);
1932 timeo = schedule_timeout(timeo);
1933 /* wakeup by sk_data_ready in smc_listen_work() */
1934 sched_annotate_sleep();
1936 if (signal_pending(current)) {
1937 rc = sock_intr_errno(timeo);
1941 set_current_state(TASK_RUNNING);
1942 remove_wait_queue(sk_sleep(sk), &wait);
1945 rc = sock_error(nsk);
1950 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1951 /* wait till data arrives on the socket */
1952 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1954 if (smc_sk(nsk)->use_fallback) {
1955 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1958 if (skb_queue_empty(&clcsk->sk_receive_queue))
1959 sk_wait_data(clcsk, &timeo, NULL);
1960 release_sock(clcsk);
1961 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1963 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1969 sock_put(sk); /* sock_hold above */
1973 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1976 struct smc_sock *smc;
1978 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1979 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1982 smc = smc_sk(sock->sk);
1984 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1987 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1989 struct sock *sk = sock->sk;
1990 struct smc_sock *smc;
1995 if ((sk->sk_state != SMC_ACTIVE) &&
1996 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1997 (sk->sk_state != SMC_INIT))
2000 if (msg->msg_flags & MSG_FASTOPEN) {
2001 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2002 smc_switch_to_fallback(smc);
2003 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
2010 if (smc->use_fallback)
2011 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2013 rc = smc_tx_sendmsg(smc, msg, len);
2019 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2022 struct sock *sk = sock->sk;
2023 struct smc_sock *smc;
2028 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2029 /* socket was connected before, no more data to read */
2033 if ((sk->sk_state == SMC_INIT) ||
2034 (sk->sk_state == SMC_LISTEN) ||
2035 (sk->sk_state == SMC_CLOSED))
2038 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2043 if (smc->use_fallback) {
2044 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2046 msg->msg_namelen = 0;
2047 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2055 static __poll_t smc_accept_poll(struct sock *parent)
2057 struct smc_sock *isk = smc_sk(parent);
2060 spin_lock(&isk->accept_q_lock);
2061 if (!list_empty(&isk->accept_q))
2062 mask = EPOLLIN | EPOLLRDNORM;
2063 spin_unlock(&isk->accept_q_lock);
2068 static __poll_t smc_poll(struct file *file, struct socket *sock,
2071 struct sock *sk = sock->sk;
2072 struct smc_sock *smc;
2078 smc = smc_sk(sock->sk);
2079 if (smc->use_fallback) {
2080 /* delegate to CLC child sock */
2081 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2082 sk->sk_err = smc->clcsock->sk->sk_err;
2084 if (sk->sk_state != SMC_CLOSED)
2085 sock_poll_wait(file, sock, wait);
2088 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2089 (sk->sk_state == SMC_CLOSED))
2091 if (sk->sk_state == SMC_LISTEN) {
2092 /* woken up by sk_data_ready in smc_listen_work() */
2093 mask |= smc_accept_poll(sk);
2094 } else if (smc->use_fallback) { /* as result of connect_work()*/
2095 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2097 sk->sk_err = smc->clcsock->sk->sk_err;
2099 if ((sk->sk_state != SMC_INIT &&
2100 atomic_read(&smc->conn.sndbuf_space)) ||
2101 sk->sk_shutdown & SEND_SHUTDOWN) {
2102 mask |= EPOLLOUT | EPOLLWRNORM;
2104 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2105 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2107 if (atomic_read(&smc->conn.bytes_to_rcv))
2108 mask |= EPOLLIN | EPOLLRDNORM;
2109 if (sk->sk_shutdown & RCV_SHUTDOWN)
2110 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2111 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2113 if (smc->conn.urg_state == SMC_URG_VALID)
2121 static int smc_shutdown(struct socket *sock, int how)
2123 struct sock *sk = sock->sk;
2124 bool do_shutdown = true;
2125 struct smc_sock *smc;
2132 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2138 if ((sk->sk_state != SMC_ACTIVE) &&
2139 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2140 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2141 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2142 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2143 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2145 if (smc->use_fallback) {
2146 rc = kernel_sock_shutdown(smc->clcsock, how);
2147 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2148 if (sk->sk_shutdown == SHUTDOWN_MASK) {
2149 sk->sk_state = SMC_CLOSED;
2155 case SHUT_RDWR: /* shutdown in both directions */
2156 old_state = sk->sk_state;
2157 rc = smc_close_active(smc);
2158 if (old_state == SMC_ACTIVE &&
2159 sk->sk_state == SMC_PEERCLOSEWAIT1)
2160 do_shutdown = false;
2163 rc = smc_close_shutdown_write(smc);
2167 /* nothing more to do because peer is not involved */
2170 if (do_shutdown && smc->clcsock)
2171 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2172 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2173 sk->sk_shutdown |= how + 1;
2177 return rc ? rc : rc1;
2180 static int smc_setsockopt(struct socket *sock, int level, int optname,
2181 sockptr_t optval, unsigned int optlen)
2183 struct sock *sk = sock->sk;
2184 struct smc_sock *smc;
2187 if (level == SOL_TCP && optname == TCP_ULP)
2192 /* generic setsockopts reaching us here always apply to the
2195 if (unlikely(!smc->clcsock->ops->setsockopt))
2198 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2200 if (smc->clcsock->sk->sk_err) {
2201 sk->sk_err = smc->clcsock->sk->sk_err;
2202 sk->sk_error_report(sk);
2205 if (optlen < sizeof(int))
2207 if (copy_from_sockptr(&val, optval, sizeof(int)))
2211 if (rc || smc->use_fallback)
2215 case TCP_FASTOPEN_CONNECT:
2216 case TCP_FASTOPEN_KEY:
2217 case TCP_FASTOPEN_NO_COOKIE:
2218 /* option not supported by SMC */
2219 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2220 smc_switch_to_fallback(smc);
2221 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
2227 if (sk->sk_state != SMC_INIT &&
2228 sk->sk_state != SMC_LISTEN &&
2229 sk->sk_state != SMC_CLOSED) {
2231 mod_delayed_work(smc->conn.lgr->tx_wq,
2232 &smc->conn.tx_work, 0);
2236 if (sk->sk_state != SMC_INIT &&
2237 sk->sk_state != SMC_LISTEN &&
2238 sk->sk_state != SMC_CLOSED) {
2240 mod_delayed_work(smc->conn.lgr->tx_wq,
2241 &smc->conn.tx_work, 0);
2244 case TCP_DEFER_ACCEPT:
2245 smc->sockopt_defer_accept = val;
2256 static int smc_getsockopt(struct socket *sock, int level, int optname,
2257 char __user *optval, int __user *optlen)
2259 struct smc_sock *smc;
2261 smc = smc_sk(sock->sk);
2262 /* socket options apply to the CLC socket */
2263 if (unlikely(!smc->clcsock->ops->getsockopt))
2265 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2269 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2272 union smc_host_cursor cons, urg;
2273 struct smc_connection *conn;
2274 struct smc_sock *smc;
2277 smc = smc_sk(sock->sk);
2279 lock_sock(&smc->sk);
2280 if (smc->use_fallback) {
2281 if (!smc->clcsock) {
2282 release_sock(&smc->sk);
2285 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2286 release_sock(&smc->sk);
2290 case SIOCINQ: /* same as FIONREAD */
2291 if (smc->sk.sk_state == SMC_LISTEN) {
2292 release_sock(&smc->sk);
2295 if (smc->sk.sk_state == SMC_INIT ||
2296 smc->sk.sk_state == SMC_CLOSED)
2299 answ = atomic_read(&smc->conn.bytes_to_rcv);
2302 /* output queue size (not send + not acked) */
2303 if (smc->sk.sk_state == SMC_LISTEN) {
2304 release_sock(&smc->sk);
2307 if (smc->sk.sk_state == SMC_INIT ||
2308 smc->sk.sk_state == SMC_CLOSED)
2311 answ = smc->conn.sndbuf_desc->len -
2312 atomic_read(&smc->conn.sndbuf_space);
2315 /* output queue size (not send only) */
2316 if (smc->sk.sk_state == SMC_LISTEN) {
2317 release_sock(&smc->sk);
2320 if (smc->sk.sk_state == SMC_INIT ||
2321 smc->sk.sk_state == SMC_CLOSED)
2324 answ = smc_tx_prepared_sends(&smc->conn);
2327 if (smc->sk.sk_state == SMC_LISTEN) {
2328 release_sock(&smc->sk);
2331 if (smc->sk.sk_state == SMC_INIT ||
2332 smc->sk.sk_state == SMC_CLOSED) {
2335 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2336 smc_curs_copy(&urg, &conn->urg_curs, conn);
2337 answ = smc_curs_diff(conn->rmb_desc->len,
2342 release_sock(&smc->sk);
2343 return -ENOIOCTLCMD;
2345 release_sock(&smc->sk);
2347 return put_user(answ, (int __user *)arg);
2350 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2351 int offset, size_t size, int flags)
2353 struct sock *sk = sock->sk;
2354 struct smc_sock *smc;
2359 if (sk->sk_state != SMC_ACTIVE) {
2364 if (smc->use_fallback)
2365 rc = kernel_sendpage(smc->clcsock, page, offset,
2368 rc = sock_no_sendpage(sock, page, offset, size, flags);
2374 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2375 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2376 * updates till whenever a respective page has been fully processed.
2377 * Note that subsequent recv() calls have to wait till all splice() processing
2380 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2381 struct pipe_inode_info *pipe, size_t len,
2384 struct sock *sk = sock->sk;
2385 struct smc_sock *smc;
2390 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2391 /* socket was connected before, no more data to read */
2395 if (sk->sk_state == SMC_INIT ||
2396 sk->sk_state == SMC_LISTEN ||
2397 sk->sk_state == SMC_CLOSED)
2400 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2405 if (smc->use_fallback) {
2406 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2413 if (flags & SPLICE_F_NONBLOCK)
2414 flags = MSG_DONTWAIT;
2417 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2425 /* must look like tcp */
2426 static const struct proto_ops smc_sock_ops = {
2428 .owner = THIS_MODULE,
2429 .release = smc_release,
2431 .connect = smc_connect,
2432 .socketpair = sock_no_socketpair,
2433 .accept = smc_accept,
2434 .getname = smc_getname,
2437 .listen = smc_listen,
2438 .shutdown = smc_shutdown,
2439 .setsockopt = smc_setsockopt,
2440 .getsockopt = smc_getsockopt,
2441 .sendmsg = smc_sendmsg,
2442 .recvmsg = smc_recvmsg,
2443 .mmap = sock_no_mmap,
2444 .sendpage = smc_sendpage,
2445 .splice_read = smc_splice_read,
2448 static int smc_create(struct net *net, struct socket *sock, int protocol,
2451 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2452 struct smc_sock *smc;
2456 rc = -ESOCKTNOSUPPORT;
2457 if (sock->type != SOCK_STREAM)
2460 rc = -EPROTONOSUPPORT;
2461 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2465 sock->ops = &smc_sock_ops;
2466 sk = smc_sock_alloc(net, sock, protocol);
2470 /* create internal TCP socket for CLC handshake and fallback */
2472 smc->use_fallback = false; /* assume rdma capability first */
2473 smc->fallback_rsn = 0;
2474 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2477 sk_common_release(sk);
2480 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2481 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2487 static const struct net_proto_family smc_sock_family_ops = {
2489 .owner = THIS_MODULE,
2490 .create = smc_create,
2493 unsigned int smc_net_id;
2495 static __net_init int smc_net_init(struct net *net)
2497 return smc_pnet_net_init(net);
2500 static void __net_exit smc_net_exit(struct net *net)
2502 smc_pnet_net_exit(net);
2505 static struct pernet_operations smc_net_ops = {
2506 .init = smc_net_init,
2507 .exit = smc_net_exit,
2509 .size = sizeof(struct smc_net),
2512 static int __init smc_init(void)
2516 rc = register_pernet_subsys(&smc_net_ops);
2523 rc = smc_pnet_init();
2525 goto out_pernet_subsys;
2528 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
2532 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
2534 goto out_alloc_hs_wq;
2536 rc = smc_core_init();
2538 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2542 rc = smc_llc_init();
2544 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2548 rc = smc_cdc_init();
2550 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2554 rc = proto_register(&smc_proto, 1);
2556 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2560 rc = proto_register(&smc_proto6, 1);
2562 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2566 rc = sock_register(&smc_sock_family_ops);
2568 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2571 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2572 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2574 rc = smc_ib_register_client();
2576 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2580 static_branch_enable(&tcp_have_smc);
2584 sock_unregister(PF_SMC);
2586 proto_unregister(&smc_proto6);
2588 proto_unregister(&smc_proto);
2592 destroy_workqueue(smc_close_wq);
2594 destroy_workqueue(smc_hs_wq);
2598 unregister_pernet_subsys(&smc_net_ops);
2603 static void __exit smc_exit(void)
2605 static_branch_disable(&tcp_have_smc);
2606 sock_unregister(PF_SMC);
2608 smc_ib_unregister_client();
2609 destroy_workqueue(smc_close_wq);
2610 destroy_workqueue(smc_hs_wq);
2611 proto_unregister(&smc_proto6);
2612 proto_unregister(&smc_proto);
2614 unregister_pernet_subsys(&smc_net_ops);
2618 module_init(smc_init);
2619 module_exit(smc_exit);
2621 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2622 MODULE_DESCRIPTION("smc socket address family");
2623 MODULE_LICENSE("GPL");
2624 MODULE_ALIAS_NETPROTO(PF_SMC);