1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
52 #include "smc_stats.h"
54 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
57 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
61 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
62 struct workqueue_struct *smc_close_wq; /* wq for close work */
64 static void smc_tcp_listen_work(struct work_struct *);
65 static void smc_connect_work(struct work_struct *);
67 static void smc_set_keepalive(struct sock *sk, int val)
69 struct smc_sock *smc = smc_sk(sk);
71 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
74 static struct smc_hashinfo smc_v4_hashinfo = {
75 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
78 static struct smc_hashinfo smc_v6_hashinfo = {
79 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
82 int smc_hash_sk(struct sock *sk)
84 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
85 struct hlist_head *head;
89 write_lock_bh(&h->lock);
90 sk_add_node(sk, head);
91 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
92 write_unlock_bh(&h->lock);
96 EXPORT_SYMBOL_GPL(smc_hash_sk);
98 void smc_unhash_sk(struct sock *sk)
100 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
102 write_lock_bh(&h->lock);
103 if (sk_del_node_init(sk))
104 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
105 write_unlock_bh(&h->lock);
107 EXPORT_SYMBOL_GPL(smc_unhash_sk);
109 struct proto smc_proto = {
111 .owner = THIS_MODULE,
112 .keepalive = smc_set_keepalive,
114 .unhash = smc_unhash_sk,
115 .obj_size = sizeof(struct smc_sock),
116 .h.smc_hash = &smc_v4_hashinfo,
117 .slab_flags = SLAB_TYPESAFE_BY_RCU,
119 EXPORT_SYMBOL_GPL(smc_proto);
121 struct proto smc_proto6 = {
123 .owner = THIS_MODULE,
124 .keepalive = smc_set_keepalive,
126 .unhash = smc_unhash_sk,
127 .obj_size = sizeof(struct smc_sock),
128 .h.smc_hash = &smc_v6_hashinfo,
129 .slab_flags = SLAB_TYPESAFE_BY_RCU,
131 EXPORT_SYMBOL_GPL(smc_proto6);
133 static void smc_restore_fallback_changes(struct smc_sock *smc)
135 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
136 smc->clcsock->file->private_data = smc->sk.sk_socket;
137 smc->clcsock->file = NULL;
141 static int __smc_release(struct smc_sock *smc)
143 struct sock *sk = &smc->sk;
146 if (!smc->use_fallback) {
147 rc = smc_close_active(smc);
148 sock_set_flag(sk, SOCK_DEAD);
149 sk->sk_shutdown |= SHUTDOWN_MASK;
151 if (sk->sk_state != SMC_CLOSED) {
152 if (sk->sk_state != SMC_LISTEN &&
153 sk->sk_state != SMC_INIT)
154 sock_put(sk); /* passive closing */
155 if (sk->sk_state == SMC_LISTEN) {
156 /* wake up clcsock accept */
157 rc = kernel_sock_shutdown(smc->clcsock,
160 sk->sk_state = SMC_CLOSED;
161 sk->sk_state_change(sk);
163 smc_restore_fallback_changes(smc);
166 sk->sk_prot->unhash(sk);
168 if (sk->sk_state == SMC_CLOSED) {
171 smc_clcsock_release(smc);
174 if (!smc->use_fallback)
175 smc_conn_free(&smc->conn);
181 static int smc_release(struct socket *sock)
183 struct sock *sk = sock->sk;
184 struct smc_sock *smc;
185 int old_state, rc = 0;
190 sock_hold(sk); /* sock_put below */
193 old_state = sk->sk_state;
195 /* cleanup for a dangling non-blocking connect */
196 if (smc->connect_nonblock && old_state == SMC_INIT)
197 tcp_abort(smc->clcsock->sk, ECONNABORTED);
199 if (cancel_work_sync(&smc->connect_work))
200 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
202 if (sk->sk_state == SMC_LISTEN)
203 /* smc_close_non_accepted() is called and acquires
204 * sock lock for child sockets again
206 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
210 if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
212 smc_close_active_abort(smc);
214 rc = __smc_release(smc);
221 sock_put(sk); /* sock_hold above */
222 sock_put(sk); /* final sock_put */
227 static void smc_destruct(struct sock *sk)
229 if (sk->sk_state != SMC_CLOSED)
231 if (!sock_flag(sk, SOCK_DEAD))
234 sk_refcnt_debug_dec(sk);
237 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
240 struct smc_sock *smc;
244 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
245 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
249 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
250 sk->sk_state = SMC_INIT;
251 sk->sk_destruct = smc_destruct;
252 sk->sk_protocol = protocol;
254 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
255 INIT_WORK(&smc->connect_work, smc_connect_work);
256 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
257 INIT_LIST_HEAD(&smc->accept_q);
258 spin_lock_init(&smc->accept_q_lock);
259 spin_lock_init(&smc->conn.send_lock);
260 sk->sk_prot->hash(sk);
261 sk_refcnt_debug_inc(sk);
262 mutex_init(&smc->clcsock_release_lock);
267 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
270 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
271 struct sock *sk = sock->sk;
272 struct smc_sock *smc;
277 /* replicate tests from inet_bind(), to be safe wrt. future changes */
279 if (addr_len < sizeof(struct sockaddr_in))
283 if (addr->sin_family != AF_INET &&
284 addr->sin_family != AF_INET6 &&
285 addr->sin_family != AF_UNSPEC)
287 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
288 if (addr->sin_family == AF_UNSPEC &&
289 addr->sin_addr.s_addr != htonl(INADDR_ANY))
294 /* Check if socket is already active */
296 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
299 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
300 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
308 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
311 /* options we don't get control via setsockopt for */
312 nsk->sk_type = osk->sk_type;
313 nsk->sk_sndbuf = osk->sk_sndbuf;
314 nsk->sk_rcvbuf = osk->sk_rcvbuf;
315 nsk->sk_sndtimeo = osk->sk_sndtimeo;
316 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
317 nsk->sk_mark = osk->sk_mark;
318 nsk->sk_priority = osk->sk_priority;
319 nsk->sk_rcvlowat = osk->sk_rcvlowat;
320 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
321 nsk->sk_err = osk->sk_err;
323 nsk->sk_flags &= ~mask;
324 nsk->sk_flags |= osk->sk_flags & mask;
327 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
328 (1UL << SOCK_KEEPOPEN) | \
329 (1UL << SOCK_LINGER) | \
330 (1UL << SOCK_BROADCAST) | \
331 (1UL << SOCK_TIMESTAMP) | \
332 (1UL << SOCK_DBG) | \
333 (1UL << SOCK_RCVTSTAMP) | \
334 (1UL << SOCK_RCVTSTAMPNS) | \
335 (1UL << SOCK_LOCALROUTE) | \
336 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
337 (1UL << SOCK_RXQ_OVFL) | \
338 (1UL << SOCK_WIFI_STATUS) | \
339 (1UL << SOCK_NOFCS) | \
340 (1UL << SOCK_FILTER_LOCKED) | \
341 (1UL << SOCK_TSTAMP_NEW))
342 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
343 * clc socket (since smc is not called for these options from net/core)
345 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
347 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
350 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
351 (1UL << SOCK_KEEPOPEN) | \
352 (1UL << SOCK_LINGER) | \
354 /* copy only settings and flags relevant for smc from clc to smc socket */
355 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
357 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
360 /* register the new rmb on all links */
361 static int smcr_lgr_reg_rmbs(struct smc_link *link,
362 struct smc_buf_desc *rmb_desc)
364 struct smc_link_group *lgr = link->lgr;
367 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
370 /* protect against parallel smc_llc_cli_rkey_exchange() and
371 * parallel smcr_link_reg_rmb()
373 mutex_lock(&lgr->llc_conf_mutex);
374 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
375 if (!smc_link_active(&lgr->lnk[i]))
377 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
382 /* exchange confirm_rkey msg with peer */
383 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
388 rmb_desc->is_conf_rkey = true;
390 mutex_unlock(&lgr->llc_conf_mutex);
391 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
395 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
397 struct smc_link *link = smc->conn.lnk;
398 struct smc_llc_qentry *qentry;
401 /* receive CONFIRM LINK request from server over RoCE fabric */
402 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
403 SMC_LLC_CONFIRM_LINK);
405 struct smc_clc_msg_decline dclc;
407 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
408 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
409 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
411 smc_llc_save_peer_uid(qentry);
412 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
413 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
415 return SMC_CLC_DECL_RMBE_EC;
417 rc = smc_ib_modify_qp_rts(link);
419 return SMC_CLC_DECL_ERR_RDYLNK;
421 smc_wr_remember_qp_attr(link);
423 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
424 return SMC_CLC_DECL_ERR_REGRMB;
426 /* confirm_rkey is implicit on 1st contact */
427 smc->conn.rmb_desc->is_conf_rkey = true;
429 /* send CONFIRM LINK response over RoCE fabric */
430 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
432 return SMC_CLC_DECL_TIMEOUT_CL;
434 smc_llc_link_active(link);
435 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
437 /* optional 2nd link, receive ADD LINK request from server */
438 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
441 struct smc_clc_msg_decline dclc;
443 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
444 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
446 rc = 0; /* no DECLINE received, go with one link */
449 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
450 smc_llc_cli_add_link(link, qentry);
454 static void smcr_conn_save_peer_info(struct smc_sock *smc,
455 struct smc_clc_msg_accept_confirm *clc)
457 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
459 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
460 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
461 smc->conn.peer_rmbe_size = bufsize;
462 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
463 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
466 static bool smc_isascii(char *hostname)
470 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
471 if (!isascii(hostname[i]))
476 static void smcd_conn_save_peer_info(struct smc_sock *smc,
477 struct smc_clc_msg_accept_confirm *clc)
479 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
481 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
482 smc->conn.peer_token = clc->d0.token;
483 /* msg header takes up space in the buffer */
484 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
485 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
486 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
487 if (clc->hdr.version > SMC_V1 &&
488 (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
489 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
490 (struct smc_clc_msg_accept_confirm_v2 *)clc;
491 struct smc_clc_first_contact_ext *fce =
492 (struct smc_clc_first_contact_ext *)
493 (((u8 *)clc_v2) + sizeof(*clc_v2));
495 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
497 smc->conn.lgr->peer_os = fce->os_type;
498 smc->conn.lgr->peer_smc_release = fce->release;
499 if (smc_isascii(fce->hostname))
500 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
501 SMC_MAX_HOSTNAME_LEN);
505 static void smc_conn_save_peer_info(struct smc_sock *smc,
506 struct smc_clc_msg_accept_confirm *clc)
508 if (smc->conn.lgr->is_smcd)
509 smcd_conn_save_peer_info(smc, clc);
511 smcr_conn_save_peer_info(smc, clc);
514 static void smc_link_save_peer_info(struct smc_link *link,
515 struct smc_clc_msg_accept_confirm *clc)
517 link->peer_qpn = ntoh24(clc->r0.qpn);
518 memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
519 memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
520 link->peer_psn = ntoh24(clc->r0.psn);
521 link->peer_mtu = clc->r0.qp_mtu;
524 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
525 struct smc_stats_fback *fback_arr)
529 for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
530 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
531 fback_arr[cnt].count++;
534 if (!fback_arr[cnt].fback_code) {
535 fback_arr[cnt].fback_code = smc->fallback_rsn;
536 fback_arr[cnt].count++;
542 static void smc_stat_fallback(struct smc_sock *smc)
544 struct net *net = sock_net(&smc->sk);
546 mutex_lock(&net->smc.mutex_fback_rsn);
547 if (smc->listen_smc) {
548 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
549 net->smc.fback_rsn->srv_fback_cnt++;
551 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
552 net->smc.fback_rsn->clnt_fback_cnt++;
554 mutex_unlock(&net->smc.mutex_fback_rsn);
557 /* must be called under rcu read lock */
558 static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
560 struct socket_wq *wq;
563 wq = rcu_dereference(smc->sk.sk_wq);
564 if (!skwq_has_sleeper(wq))
567 /* wake up smc sk->sk_wq */
569 /* sk_state_change */
570 wake_up_interruptible_all(&wq->wait);
572 flags = key_to_poll(key);
573 if (flags & (EPOLLIN | EPOLLOUT))
574 /* sk_data_ready or sk_write_space */
575 wake_up_interruptible_sync_poll(&wq->wait, flags);
576 else if (flags & EPOLLERR)
577 /* sk_error_report */
578 wake_up_interruptible_poll(&wq->wait, flags);
582 static int smc_fback_mark_woken(wait_queue_entry_t *wait,
583 unsigned int mode, int sync, void *key)
585 struct smc_mark_woken *mark =
586 container_of(wait, struct smc_mark_woken, wait_entry);
593 static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
594 void (*clcsock_callback)(struct sock *sk))
596 struct smc_mark_woken mark = { .woken = false };
597 struct socket_wq *wq;
599 init_waitqueue_func_entry(&mark.wait_entry,
600 smc_fback_mark_woken);
602 wq = rcu_dereference(clcsk->sk_wq);
605 add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
606 clcsock_callback(clcsk);
607 remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
610 smc_fback_wakeup_waitqueue(smc, mark.key);
615 static void smc_fback_state_change(struct sock *clcsk)
617 struct smc_sock *smc =
618 smc_clcsock_user_data(clcsk);
622 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
625 static void smc_fback_data_ready(struct sock *clcsk)
627 struct smc_sock *smc =
628 smc_clcsock_user_data(clcsk);
632 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
635 static void smc_fback_write_space(struct sock *clcsk)
637 struct smc_sock *smc =
638 smc_clcsock_user_data(clcsk);
642 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
645 static void smc_fback_error_report(struct sock *clcsk)
647 struct smc_sock *smc =
648 smc_clcsock_user_data(clcsk);
652 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
655 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
660 mutex_lock(&smc->clcsock_release_lock);
665 clcsk = smc->clcsock->sk;
667 if (smc->use_fallback)
669 smc->use_fallback = true;
670 smc->fallback_rsn = reason_code;
671 smc_stat_fallback(smc);
672 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
673 smc->clcsock->file = smc->sk.sk_socket->file;
674 smc->clcsock->file->private_data = smc->clcsock;
675 smc->clcsock->wq.fasync_list =
676 smc->sk.sk_socket->wq.fasync_list;
678 /* There might be some wait entries remaining
679 * in smc sk->sk_wq and they should be woken up
680 * as clcsock's wait queue is woken up.
682 smc->clcsk_state_change = clcsk->sk_state_change;
683 smc->clcsk_data_ready = clcsk->sk_data_ready;
684 smc->clcsk_write_space = clcsk->sk_write_space;
685 smc->clcsk_error_report = clcsk->sk_error_report;
687 clcsk->sk_state_change = smc_fback_state_change;
688 clcsk->sk_data_ready = smc_fback_data_ready;
689 clcsk->sk_write_space = smc_fback_write_space;
690 clcsk->sk_error_report = smc_fback_error_report;
692 smc->clcsock->sk->sk_user_data =
693 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
696 mutex_unlock(&smc->clcsock_release_lock);
700 /* fall back during connect */
701 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
703 struct net *net = sock_net(&smc->sk);
706 rc = smc_switch_to_fallback(smc, reason_code);
707 if (rc) { /* fallback fails */
708 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
709 if (smc->sk.sk_state == SMC_INIT)
710 sock_put(&smc->sk); /* passive closing */
713 smc_copy_sock_settings_to_clc(smc);
714 smc->connect_nonblock = 0;
715 if (smc->sk.sk_state == SMC_INIT)
716 smc->sk.sk_state = SMC_ACTIVE;
720 /* decline and fall back during connect */
721 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
724 struct net *net = sock_net(&smc->sk);
727 if (reason_code < 0) { /* error, fallback is not possible */
728 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
729 if (smc->sk.sk_state == SMC_INIT)
730 sock_put(&smc->sk); /* passive closing */
733 if (reason_code != SMC_CLC_DECL_PEERDECL) {
734 rc = smc_clc_send_decline(smc, reason_code, version);
736 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
737 if (smc->sk.sk_state == SMC_INIT)
738 sock_put(&smc->sk); /* passive closing */
742 return smc_connect_fallback(smc, reason_code);
745 static void smc_conn_abort(struct smc_sock *smc, int local_first)
747 struct smc_connection *conn = &smc->conn;
748 struct smc_link_group *lgr = conn->lgr;
752 smc_lgr_cleanup_early(lgr);
755 /* check if there is a rdma device available for this connection. */
756 /* called for connect and listen */
757 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
759 /* PNET table look up: search active ib_device and port
760 * within same PNETID that also contains the ethernet device
761 * used for the internal TCP socket
763 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
765 return SMC_CLC_DECL_NOSMCRDEV;
769 /* check if there is an ISM device available for this connection. */
770 /* called for connect and listen */
771 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
773 /* Find ISM device with same PNETID as connecting interface */
774 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
775 if (!ini->ism_dev[0])
776 return SMC_CLC_DECL_NOSMCDDEV;
778 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
782 /* is chid unique for the ism devices that are already determined? */
783 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
786 int i = (!ini->ism_dev[0]) ? 1 : 0;
789 if (ini->ism_chid[i] == chid)
794 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
795 * PNETID matching net_device)
797 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
798 struct smc_init_info *ini)
800 int rc = SMC_CLC_DECL_NOSMCDDEV;
801 struct smcd_dev *smcd;
805 if (smcd_indicated(ini->smc_type_v1))
806 rc = 0; /* already initialized for V1 */
807 mutex_lock(&smcd_dev_list.mutex);
808 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
809 if (smcd->going_away || smcd == ini->ism_dev[0])
811 chid = smc_ism_get_chid(smcd);
812 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
814 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
815 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
816 ini->ism_dev[i] = smcd;
817 ini->ism_chid[i] = chid;
821 if (i > SMC_MAX_ISM_DEVS)
825 mutex_unlock(&smcd_dev_list.mutex);
826 ini->ism_offered_cnt = i - 1;
827 if (!ini->ism_dev[0] && !ini->ism_dev[1])
828 ini->smcd_version = 0;
833 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
834 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
835 struct smc_init_info *ini)
837 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
838 return SMC_CLC_DECL_ISMVLANERR;
842 static int smc_find_proposal_devices(struct smc_sock *smc,
843 struct smc_init_info *ini)
847 /* check if there is an ism device available */
848 if (ini->smcd_version & SMC_V1) {
849 if (smc_find_ism_device(smc, ini) ||
850 smc_connect_ism_vlan_setup(smc, ini)) {
851 if (ini->smc_type_v1 == SMC_TYPE_B)
852 ini->smc_type_v1 = SMC_TYPE_R;
854 ini->smc_type_v1 = SMC_TYPE_N;
855 } /* else ISM V1 is supported for this connection */
856 if (smc_find_rdma_device(smc, ini)) {
857 if (ini->smc_type_v1 == SMC_TYPE_B)
858 ini->smc_type_v1 = SMC_TYPE_D;
860 ini->smc_type_v1 = SMC_TYPE_N;
861 } /* else RDMA is supported for this connection */
863 if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
864 ini->smc_type_v2 = SMC_TYPE_N;
866 /* if neither ISM nor RDMA are supported, fallback */
867 if (!smcr_indicated(ini->smc_type_v1) &&
868 ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
869 rc = SMC_CLC_DECL_NOSMCDEV;
874 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
875 * used, the VLAN ID will be registered again during the connection setup.
877 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
878 struct smc_init_info *ini)
880 if (!smcd_indicated(ini->smc_type_v1))
882 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
883 return SMC_CLC_DECL_CNFERR;
887 #define SMC_CLC_MAX_ACCEPT_LEN \
888 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
889 sizeof(struct smc_clc_first_contact_ext) + \
890 sizeof(struct smc_clc_msg_trail))
892 /* CLC handshake during connect */
893 static int smc_connect_clc(struct smc_sock *smc,
894 struct smc_clc_msg_accept_confirm_v2 *aclc2,
895 struct smc_init_info *ini)
899 /* do inband token exchange */
900 rc = smc_clc_send_proposal(smc, ini);
903 /* receive SMC Accept CLC message */
904 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
905 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
908 /* setup for RDMA connection of client */
909 static int smc_connect_rdma(struct smc_sock *smc,
910 struct smc_clc_msg_accept_confirm *aclc,
911 struct smc_init_info *ini)
913 int i, reason_code = 0;
914 struct smc_link *link;
916 ini->is_smcd = false;
917 ini->ib_lcl = &aclc->r0.lcl;
918 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
919 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
921 mutex_lock(&smc_client_lgr_pending);
922 reason_code = smc_conn_create(smc, ini);
924 mutex_unlock(&smc_client_lgr_pending);
928 smc_conn_save_peer_info(smc, aclc);
930 if (ini->first_contact_local) {
931 link = smc->conn.lnk;
933 /* set link that was assigned by server */
935 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
936 struct smc_link *l = &smc->conn.lgr->lnk[i];
938 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
939 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
941 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
942 sizeof(l->peer_mac))) {
948 reason_code = SMC_CLC_DECL_NOSRVLINK;
951 smc_switch_link_and_count(&smc->conn, link);
954 /* create send buffer and rmb */
955 if (smc_buf_create(smc, false)) {
956 reason_code = SMC_CLC_DECL_MEM;
960 if (ini->first_contact_local)
961 smc_link_save_peer_info(link, aclc);
963 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
964 reason_code = SMC_CLC_DECL_ERR_RTOK;
971 if (ini->first_contact_local) {
972 if (smc_ib_ready_link(link)) {
973 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
977 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
978 reason_code = SMC_CLC_DECL_ERR_REGRMB;
982 smc_rmb_sync_sg_for_device(&smc->conn);
984 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
991 if (ini->first_contact_local) {
992 /* QP confirmation over RoCE fabric */
993 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
994 reason_code = smcr_clnt_conf_first_link(smc);
995 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
999 mutex_unlock(&smc_client_lgr_pending);
1001 smc_copy_sock_settings_to_clc(smc);
1002 smc->connect_nonblock = 0;
1003 if (smc->sk.sk_state == SMC_INIT)
1004 smc->sk.sk_state = SMC_ACTIVE;
1008 smc_conn_abort(smc, ini->first_contact_local);
1009 mutex_unlock(&smc_client_lgr_pending);
1010 smc->connect_nonblock = 0;
1015 /* The server has chosen one of the proposed ISM devices for the communication.
1016 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1019 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
1020 struct smc_init_info *ini)
1024 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1025 if (ini->ism_chid[i] == ntohs(aclc->chid)) {
1026 ini->ism_selected = i;
1034 /* setup for ISM connection of client */
1035 static int smc_connect_ism(struct smc_sock *smc,
1036 struct smc_clc_msg_accept_confirm *aclc,
1037 struct smc_init_info *ini)
1041 ini->is_smcd = true;
1042 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1044 if (aclc->hdr.version == SMC_V2) {
1045 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
1046 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1048 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
1052 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
1054 /* there is only one lgr role for SMC-D; use server lock */
1055 mutex_lock(&smc_server_lgr_pending);
1056 rc = smc_conn_create(smc, ini);
1058 mutex_unlock(&smc_server_lgr_pending);
1062 /* Create send and receive buffers */
1063 rc = smc_buf_create(smc, true);
1065 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1069 smc_conn_save_peer_info(smc, aclc);
1070 smc_close_init(smc);
1074 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1078 mutex_unlock(&smc_server_lgr_pending);
1080 smc_copy_sock_settings_to_clc(smc);
1081 smc->connect_nonblock = 0;
1082 if (smc->sk.sk_state == SMC_INIT)
1083 smc->sk.sk_state = SMC_ACTIVE;
1087 smc_conn_abort(smc, ini->first_contact_local);
1088 mutex_unlock(&smc_server_lgr_pending);
1089 smc->connect_nonblock = 0;
1094 /* check if received accept type and version matches a proposed one */
1095 static int smc_connect_check_aclc(struct smc_init_info *ini,
1096 struct smc_clc_msg_accept_confirm *aclc)
1098 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1099 !smcr_indicated(ini->smc_type_v1)) ||
1100 (aclc->hdr.typev1 == SMC_TYPE_D &&
1101 ((!smcd_indicated(ini->smc_type_v1) &&
1102 !smcd_indicated(ini->smc_type_v2)) ||
1103 (aclc->hdr.version == SMC_V1 &&
1104 !smcd_indicated(ini->smc_type_v1)) ||
1105 (aclc->hdr.version == SMC_V2 &&
1106 !smcd_indicated(ini->smc_type_v2)))))
1107 return SMC_CLC_DECL_MODEUNSUPP;
1112 /* perform steps before actually connecting */
1113 static int __smc_connect(struct smc_sock *smc)
1115 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1116 struct smc_clc_msg_accept_confirm_v2 *aclc2;
1117 struct smc_clc_msg_accept_confirm *aclc;
1118 struct smc_init_info *ini = NULL;
1122 if (smc->use_fallback)
1123 return smc_connect_fallback(smc, smc->fallback_rsn);
1125 /* if peer has not signalled SMC-capability, fall back */
1126 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1127 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1129 /* IPSec connections opt out of SMC optimizations */
1130 if (using_ipsec(smc))
1131 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1134 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1136 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1139 ini->smcd_version = SMC_V1;
1140 ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
1141 ini->smc_type_v1 = SMC_TYPE_B;
1142 ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
1144 /* get vlan id from IP device */
1145 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1146 ini->smcd_version &= ~SMC_V1;
1147 ini->smc_type_v1 = SMC_TYPE_N;
1148 if (!ini->smcd_version) {
1149 rc = SMC_CLC_DECL_GETVLANERR;
1154 rc = smc_find_proposal_devices(smc, ini);
1158 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1160 rc = SMC_CLC_DECL_MEM;
1163 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
1164 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1166 /* perform CLC handshake */
1167 rc = smc_connect_clc(smc, aclc2, ini);
1171 /* check if smc modes and versions of CLC proposal and accept match */
1172 rc = smc_connect_check_aclc(ini, aclc);
1173 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1174 ini->smcd_version = version;
1178 /* depending on previous steps, connect using rdma or ism */
1179 if (aclc->hdr.typev1 == SMC_TYPE_R)
1180 rc = smc_connect_rdma(smc, aclc, ini);
1181 else if (aclc->hdr.typev1 == SMC_TYPE_D)
1182 rc = smc_connect_ism(smc, aclc, ini);
1186 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1187 smc_connect_ism_vlan_cleanup(smc, ini);
1193 smc_connect_ism_vlan_cleanup(smc, ini);
1197 return smc_connect_decline_fallback(smc, rc, version);
1200 static void smc_connect_work(struct work_struct *work)
1202 struct smc_sock *smc = container_of(work, struct smc_sock,
1204 long timeo = smc->sk.sk_sndtimeo;
1208 timeo = MAX_SCHEDULE_TIMEOUT;
1209 lock_sock(smc->clcsock->sk);
1210 if (smc->clcsock->sk->sk_err) {
1211 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1212 } else if ((1 << smc->clcsock->sk->sk_state) &
1213 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1214 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1215 if ((rc == -EPIPE) &&
1216 ((1 << smc->clcsock->sk->sk_state) &
1217 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1220 release_sock(smc->clcsock->sk);
1221 lock_sock(&smc->sk);
1222 if (rc != 0 || smc->sk.sk_err) {
1223 smc->sk.sk_state = SMC_CLOSED;
1224 if (rc == -EPIPE || rc == -EAGAIN)
1225 smc->sk.sk_err = EPIPE;
1226 else if (rc == -ECONNREFUSED)
1227 smc->sk.sk_err = ECONNREFUSED;
1228 else if (signal_pending(current))
1229 smc->sk.sk_err = -sock_intr_errno(timeo);
1230 sock_put(&smc->sk); /* passive closing */
1234 rc = __smc_connect(smc);
1236 smc->sk.sk_err = -rc;
1239 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1240 if (smc->sk.sk_err) {
1241 smc->sk.sk_state_change(&smc->sk);
1242 } else { /* allow polling before and after fallback decision */
1243 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1244 smc->sk.sk_write_space(&smc->sk);
1247 release_sock(&smc->sk);
1250 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1251 int alen, int flags)
1253 struct sock *sk = sock->sk;
1254 struct smc_sock *smc;
1259 /* separate smc parameter checking to be safe */
1260 if (alen < sizeof(addr->sa_family))
1262 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1266 switch (sk->sk_state) {
1276 smc_copy_sock_settings_to_clc(smc);
1277 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1278 if (smc->connect_nonblock) {
1282 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1283 if (rc && rc != -EINPROGRESS)
1286 if (smc->use_fallback)
1288 sock_hold(&smc->sk); /* sock put in passive closing */
1289 if (flags & O_NONBLOCK) {
1290 if (queue_work(smc_hs_wq, &smc->connect_work))
1291 smc->connect_nonblock = 1;
1294 rc = __smc_connect(smc);
1298 rc = 0; /* success cases including fallback */
1307 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1309 struct socket *new_clcsock = NULL;
1310 struct sock *lsk = &lsmc->sk;
1311 struct sock *new_sk;
1315 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1318 lsk->sk_err = ENOMEM;
1323 *new_smc = smc_sk(new_sk);
1325 mutex_lock(&lsmc->clcsock_release_lock);
1327 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1328 mutex_unlock(&lsmc->clcsock_release_lock);
1330 if (rc < 0 && rc != -EAGAIN)
1332 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1333 new_sk->sk_prot->unhash(new_sk);
1335 sock_release(new_clcsock);
1336 new_sk->sk_state = SMC_CLOSED;
1337 sock_set_flag(new_sk, SOCK_DEAD);
1338 sock_put(new_sk); /* final */
1343 /* new clcsock has inherited the smc listen-specific sk_data_ready
1344 * function; switch it back to the original sk_data_ready function
1346 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1347 (*new_smc)->clcsock = new_clcsock;
1352 /* add a just created sock to the accept queue of the listen sock as
1353 * candidate for a following socket accept call from user space
1355 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1357 struct smc_sock *par = smc_sk(parent);
1359 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1360 spin_lock(&par->accept_q_lock);
1361 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1362 spin_unlock(&par->accept_q_lock);
1363 sk_acceptq_added(parent);
1366 /* remove a socket from the accept queue of its parental listening socket */
1367 static void smc_accept_unlink(struct sock *sk)
1369 struct smc_sock *par = smc_sk(sk)->listen_smc;
1371 spin_lock(&par->accept_q_lock);
1372 list_del_init(&smc_sk(sk)->accept_q);
1373 spin_unlock(&par->accept_q_lock);
1374 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1375 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1378 /* remove a sock from the accept queue to bind it to a new socket created
1379 * for a socket accept call from user space
1381 struct sock *smc_accept_dequeue(struct sock *parent,
1382 struct socket *new_sock)
1384 struct smc_sock *isk, *n;
1385 struct sock *new_sk;
1387 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1388 new_sk = (struct sock *)isk;
1390 smc_accept_unlink(new_sk);
1391 if (new_sk->sk_state == SMC_CLOSED) {
1392 new_sk->sk_prot->unhash(new_sk);
1394 sock_release(isk->clcsock);
1395 isk->clcsock = NULL;
1397 sock_put(new_sk); /* final */
1401 sock_graft(new_sk, new_sock);
1402 if (isk->use_fallback) {
1403 smc_sk(new_sk)->clcsock->file = new_sock->file;
1404 isk->clcsock->file->private_data = isk->clcsock;
1412 /* clean up for a created but never accepted sock */
1413 void smc_close_non_accepted(struct sock *sk)
1415 struct smc_sock *smc = smc_sk(sk);
1417 sock_hold(sk); /* sock_put below */
1419 if (!sk->sk_lingertime)
1420 /* wait for peer closing */
1421 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1424 sock_put(sk); /* sock_hold above */
1425 sock_put(sk); /* final sock_put */
1428 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1430 struct smc_link *link = smc->conn.lnk;
1431 struct smc_llc_qentry *qentry;
1434 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1435 return SMC_CLC_DECL_ERR_REGRMB;
1437 /* send CONFIRM LINK request to client over the RoCE fabric */
1438 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1440 return SMC_CLC_DECL_TIMEOUT_CL;
1442 /* receive CONFIRM LINK response from client over the RoCE fabric */
1443 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1444 SMC_LLC_CONFIRM_LINK);
1446 struct smc_clc_msg_decline dclc;
1448 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1449 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1450 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1452 smc_llc_save_peer_uid(qentry);
1453 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1454 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1456 return SMC_CLC_DECL_RMBE_EC;
1458 /* confirm_rkey is implicit on 1st contact */
1459 smc->conn.rmb_desc->is_conf_rkey = true;
1461 smc_llc_link_active(link);
1462 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1464 /* initial contact - try to establish second link */
1465 smc_llc_srv_add_link(link);
1469 /* listen worker: finish */
1470 static void smc_listen_out(struct smc_sock *new_smc)
1472 struct smc_sock *lsmc = new_smc->listen_smc;
1473 struct sock *newsmcsk = &new_smc->sk;
1475 if (lsmc->sk.sk_state == SMC_LISTEN) {
1476 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1477 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1478 release_sock(&lsmc->sk);
1479 } else { /* no longer listening */
1480 smc_close_non_accepted(newsmcsk);
1483 /* Wake up accept */
1484 lsmc->sk.sk_data_ready(&lsmc->sk);
1485 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1488 /* listen worker: finish in state connected */
1489 static void smc_listen_out_connected(struct smc_sock *new_smc)
1491 struct sock *newsmcsk = &new_smc->sk;
1493 if (newsmcsk->sk_state == SMC_INIT)
1494 newsmcsk->sk_state = SMC_ACTIVE;
1496 smc_listen_out(new_smc);
1499 /* listen worker: finish in error state */
1500 static void smc_listen_out_err(struct smc_sock *new_smc)
1502 struct sock *newsmcsk = &new_smc->sk;
1503 struct net *net = sock_net(newsmcsk);
1505 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1506 if (newsmcsk->sk_state == SMC_INIT)
1507 sock_put(&new_smc->sk); /* passive closing */
1508 newsmcsk->sk_state = SMC_CLOSED;
1510 smc_listen_out(new_smc);
1513 /* listen worker: decline and fall back if possible */
1514 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1515 int local_first, u8 version)
1517 /* RDMA setup failed, switch back to TCP */
1518 smc_conn_abort(new_smc, local_first);
1519 if (reason_code < 0 ||
1520 smc_switch_to_fallback(new_smc, reason_code)) {
1521 /* error, no fallback possible */
1522 smc_listen_out_err(new_smc);
1525 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1526 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1527 smc_listen_out_err(new_smc);
1531 smc_listen_out_connected(new_smc);
1534 /* listen worker: version checking */
1535 static int smc_listen_v2_check(struct smc_sock *new_smc,
1536 struct smc_clc_msg_proposal *pclc,
1537 struct smc_init_info *ini)
1539 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1540 struct smc_clc_v2_extension *pclc_v2_ext;
1541 int rc = SMC_CLC_DECL_PEERNOSMC;
1543 ini->smc_type_v1 = pclc->hdr.typev1;
1544 ini->smc_type_v2 = pclc->hdr.typev2;
1545 ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
1546 if (pclc->hdr.version > SMC_V1)
1547 ini->smcd_version |=
1548 ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
1549 if (!(ini->smcd_version & SMC_V2)) {
1550 rc = SMC_CLC_DECL_PEERNOSMC;
1553 if (!smc_ism_is_v2_capable()) {
1554 ini->smcd_version &= ~SMC_V2;
1555 rc = SMC_CLC_DECL_NOISM2SUPP;
1558 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1560 ini->smcd_version &= ~SMC_V2;
1561 rc = SMC_CLC_DECL_NOV2EXT;
1564 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1565 if (!pclc_smcd_v2_ext) {
1566 ini->smcd_version &= ~SMC_V2;
1567 rc = SMC_CLC_DECL_NOV2DEXT;
1571 if (!ini->smcd_version)
1577 /* listen worker: check prefixes */
1578 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1579 struct smc_clc_msg_proposal *pclc)
1581 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1582 struct socket *newclcsock = new_smc->clcsock;
1584 if (pclc->hdr.typev1 == SMC_TYPE_N)
1586 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1587 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1588 return SMC_CLC_DECL_DIFFPREFIX;
1593 /* listen worker: initialize connection and buffers */
1594 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1595 struct smc_init_info *ini)
1599 /* allocate connection / link group */
1600 rc = smc_conn_create(new_smc, ini);
1604 /* create send buffer and rmb */
1605 if (smc_buf_create(new_smc, false))
1606 return SMC_CLC_DECL_MEM;
1611 /* listen worker: initialize connection and buffers for SMC-D */
1612 static int smc_listen_ism_init(struct smc_sock *new_smc,
1613 struct smc_init_info *ini)
1617 rc = smc_conn_create(new_smc, ini);
1621 /* Create send and receive buffers */
1622 rc = smc_buf_create(new_smc, true);
1624 smc_conn_abort(new_smc, ini->first_contact_local);
1625 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1632 static bool smc_is_already_selected(struct smcd_dev *smcd,
1633 struct smc_init_info *ini,
1638 for (i = 0; i < matches; i++)
1639 if (smcd == ini->ism_dev[i])
1645 /* check for ISM devices matching proposed ISM devices */
1646 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1647 u16 proposed_chid, u64 proposed_gid,
1648 unsigned int *matches)
1650 struct smcd_dev *smcd;
1652 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1653 if (smcd->going_away)
1655 if (smc_is_already_selected(smcd, ini, *matches))
1657 if (smc_ism_get_chid(smcd) == proposed_chid &&
1658 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1659 ini->ism_peer_gid[*matches] = proposed_gid;
1660 ini->ism_dev[*matches] = smcd;
1667 static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
1673 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1674 struct smc_clc_msg_proposal *pclc,
1675 struct smc_init_info *ini)
1677 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1678 struct smc_clc_v2_extension *smc_v2_ext;
1679 struct smc_clc_msg_smcd *pclc_smcd;
1680 unsigned int matches = 0;
1685 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1688 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1689 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1690 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1692 !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
1693 smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
1697 mutex_lock(&smcd_dev_list.mutex);
1698 if (pclc_smcd->ism.chid)
1699 /* check for ISM device matching proposed native ISM device */
1700 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1701 ntohll(pclc_smcd->ism.gid), &matches);
1702 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1703 /* check for ISM devices matching proposed non-native ISM
1706 smc_check_ism_v2_match(ini,
1707 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1708 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1711 mutex_unlock(&smcd_dev_list.mutex);
1713 if (ini->ism_dev[0]) {
1714 smc_ism_get_system_eid(ini->ism_dev[0], &eid);
1715 if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
1721 /* separate - outside the smcd_dev_list.lock */
1722 smcd_version = ini->smcd_version;
1723 for (i = 0; i < matches; i++) {
1724 ini->smcd_version = SMC_V2;
1725 ini->is_smcd = true;
1726 ini->ism_selected = i;
1727 rc = smc_listen_ism_init(new_smc, ini);
1729 smc_find_ism_store_rc(rc, ini);
1730 /* try next active ISM device */
1733 return; /* matching and usable V2 ISM device found */
1735 /* no V2 ISM device could be initialized */
1736 ini->smcd_version = smcd_version; /* restore original value */
1739 ini->smcd_version &= ~SMC_V2;
1740 ini->ism_dev[0] = NULL;
1741 ini->is_smcd = false;
1744 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1745 struct smc_clc_msg_proposal *pclc,
1746 struct smc_init_info *ini)
1748 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1751 /* check if ISM V1 is available */
1752 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1754 ini->is_smcd = true; /* prepare ISM check */
1755 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1756 rc = smc_find_ism_device(new_smc, ini);
1759 ini->ism_selected = 0;
1760 rc = smc_listen_ism_init(new_smc, ini);
1762 return; /* V1 ISM device found */
1765 smc_find_ism_store_rc(rc, ini);
1766 ini->ism_dev[0] = NULL;
1767 ini->is_smcd = false;
1770 /* listen worker: register buffers */
1771 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1773 struct smc_connection *conn = &new_smc->conn;
1776 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1777 return SMC_CLC_DECL_ERR_REGRMB;
1779 smc_rmb_sync_sg_for_device(&new_smc->conn);
1784 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1785 struct smc_clc_msg_proposal *pclc,
1786 struct smc_init_info *ini)
1790 if (!smcr_indicated(ini->smc_type_v1))
1791 return SMC_CLC_DECL_NOSMCDEV;
1793 /* prepare RDMA check */
1794 ini->ib_lcl = &pclc->lcl;
1795 rc = smc_find_rdma_device(new_smc, ini);
1797 /* no RDMA device found */
1798 if (ini->smc_type_v1 == SMC_TYPE_B)
1799 /* neither ISM nor RDMA device found */
1800 rc = SMC_CLC_DECL_NOSMCDEV;
1803 rc = smc_listen_rdma_init(new_smc, ini);
1806 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1809 /* determine the local device matching to proposal */
1810 static int smc_listen_find_device(struct smc_sock *new_smc,
1811 struct smc_clc_msg_proposal *pclc,
1812 struct smc_init_info *ini)
1816 /* check for ISM device matching V2 proposed device */
1817 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
1818 if (ini->ism_dev[0])
1821 if (!(ini->smcd_version & SMC_V1))
1822 return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
1824 /* check for matching IP prefix and subnet length */
1825 rc = smc_listen_prfx_check(new_smc, pclc);
1827 return ini->rc ?: rc;
1829 /* get vlan id from IP device */
1830 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
1831 return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
1833 /* check for ISM device matching V1 proposed device */
1834 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
1835 if (ini->ism_dev[0])
1838 if (pclc->hdr.typev1 == SMC_TYPE_D)
1839 /* skip RDMA and decline */
1840 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
1842 /* check if RDMA is available */
1843 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
1844 smc_find_ism_store_rc(rc, ini);
1846 return (!rc) ? 0 : ini->rc;
1849 /* listen worker: finish RDMA setup */
1850 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1851 struct smc_clc_msg_accept_confirm *cclc,
1854 struct smc_link *link = new_smc->conn.lnk;
1855 int reason_code = 0;
1858 smc_link_save_peer_info(link, cclc);
1860 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
1861 return SMC_CLC_DECL_ERR_RTOK;
1864 if (smc_ib_ready_link(link))
1865 return SMC_CLC_DECL_ERR_RDYLNK;
1866 /* QP confirmation over RoCE fabric */
1867 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1868 reason_code = smcr_serv_conf_first_link(new_smc);
1869 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1874 /* setup for connection of server */
1875 static void smc_listen_work(struct work_struct *work)
1877 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1879 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1880 struct socket *newclcsock = new_smc->clcsock;
1881 struct smc_clc_msg_accept_confirm *cclc;
1882 struct smc_clc_msg_proposal_area *buf;
1883 struct smc_clc_msg_proposal *pclc;
1884 struct smc_init_info *ini = NULL;
1887 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1888 return smc_listen_out_err(new_smc);
1890 if (new_smc->use_fallback) {
1891 smc_listen_out_connected(new_smc);
1895 /* check if peer is smc capable */
1896 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1897 rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
1899 smc_listen_out_err(new_smc);
1901 smc_listen_out_connected(new_smc);
1905 /* do inband token exchange -
1906 * wait for and receive SMC Proposal CLC message
1908 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
1910 rc = SMC_CLC_DECL_MEM;
1913 pclc = (struct smc_clc_msg_proposal *)buf;
1914 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
1915 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1918 version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
1920 /* IPSec connections opt out of SMC optimizations */
1921 if (using_ipsec(new_smc)) {
1922 rc = SMC_CLC_DECL_IPSEC;
1926 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1928 rc = SMC_CLC_DECL_MEM;
1932 /* initial version checking */
1933 rc = smc_listen_v2_check(new_smc, pclc, ini);
1937 mutex_lock(&smc_server_lgr_pending);
1938 smc_close_init(new_smc);
1939 smc_rx_init(new_smc);
1940 smc_tx_init(new_smc);
1942 /* determine ISM or RoCE device used for connection */
1943 rc = smc_listen_find_device(new_smc, pclc, ini);
1947 /* send SMC Accept CLC message */
1948 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
1949 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
1953 /* SMC-D does not need this lock any more */
1955 mutex_unlock(&smc_server_lgr_pending);
1957 /* receive SMC Confirm CLC message */
1958 memset(buf, 0, sizeof(*buf));
1959 cclc = (struct smc_clc_msg_accept_confirm *)buf;
1960 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
1961 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1969 if (!ini->is_smcd) {
1970 rc = smc_listen_rdma_finish(new_smc, cclc,
1971 ini->first_contact_local);
1974 mutex_unlock(&smc_server_lgr_pending);
1976 smc_conn_save_peer_info(new_smc, cclc);
1977 smc_listen_out_connected(new_smc);
1978 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
1982 mutex_unlock(&smc_server_lgr_pending);
1984 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
1991 static void smc_tcp_listen_work(struct work_struct *work)
1993 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1995 struct sock *lsk = &lsmc->sk;
1996 struct smc_sock *new_smc;
2000 while (lsk->sk_state == SMC_LISTEN) {
2001 rc = smc_clcsock_accept(lsmc, &new_smc);
2002 if (rc) /* clcsock accept queue empty or error */
2007 new_smc->listen_smc = lsmc;
2008 new_smc->use_fallback = lsmc->use_fallback;
2009 new_smc->fallback_rsn = lsmc->fallback_rsn;
2010 sock_hold(lsk); /* sock_put in smc_listen_work */
2011 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2012 smc_copy_sock_settings_to_smc(new_smc);
2013 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
2014 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
2015 sock_hold(&new_smc->sk); /* sock_put in passive closing */
2016 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2017 sock_put(&new_smc->sk);
2022 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2025 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2027 struct smc_sock *lsmc =
2028 smc_clcsock_user_data(listen_clcsock);
2032 lsmc->clcsk_data_ready(listen_clcsock);
2033 if (lsmc->sk.sk_state == SMC_LISTEN) {
2034 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2035 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
2036 sock_put(&lsmc->sk);
2040 static int smc_listen(struct socket *sock, int backlog)
2042 struct sock *sk = sock->sk;
2043 struct smc_sock *smc;
2050 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2051 smc->connect_nonblock)
2055 if (sk->sk_state == SMC_LISTEN) {
2056 sk->sk_max_ack_backlog = backlog;
2059 /* some socket options are handled in core, so we could not apply
2060 * them to the clc socket -- copy smc socket options to clc socket
2062 smc_copy_sock_settings_to_clc(smc);
2063 if (!smc->use_fallback)
2064 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2066 /* save original sk_data_ready function and establish
2067 * smc-specific sk_data_ready function
2069 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
2070 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
2071 smc->clcsock->sk->sk_user_data =
2072 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2073 rc = kernel_listen(smc->clcsock, backlog);
2075 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
2078 sk->sk_max_ack_backlog = backlog;
2079 sk->sk_ack_backlog = 0;
2080 sk->sk_state = SMC_LISTEN;
2087 static int smc_accept(struct socket *sock, struct socket *new_sock,
2088 int flags, bool kern)
2090 struct sock *sk = sock->sk, *nsk;
2091 DECLARE_WAITQUEUE(wait, current);
2092 struct smc_sock *lsmc;
2097 sock_hold(sk); /* sock_put below */
2100 if (lsmc->sk.sk_state != SMC_LISTEN) {
2106 /* Wait for an incoming connection */
2107 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2108 add_wait_queue_exclusive(sk_sleep(sk), &wait);
2109 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2110 set_current_state(TASK_INTERRUPTIBLE);
2116 timeo = schedule_timeout(timeo);
2117 /* wakeup by sk_data_ready in smc_listen_work() */
2118 sched_annotate_sleep();
2120 if (signal_pending(current)) {
2121 rc = sock_intr_errno(timeo);
2125 set_current_state(TASK_RUNNING);
2126 remove_wait_queue(sk_sleep(sk), &wait);
2129 rc = sock_error(nsk);
2134 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2135 /* wait till data arrives on the socket */
2136 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2138 if (smc_sk(nsk)->use_fallback) {
2139 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2142 if (skb_queue_empty(&clcsk->sk_receive_queue))
2143 sk_wait_data(clcsk, &timeo, NULL);
2144 release_sock(clcsk);
2145 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2147 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2153 sock_put(sk); /* sock_hold above */
2157 static int smc_getname(struct socket *sock, struct sockaddr *addr,
2160 struct smc_sock *smc;
2162 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2163 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2166 smc = smc_sk(sock->sk);
2168 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2171 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2173 struct sock *sk = sock->sk;
2174 struct smc_sock *smc;
2180 /* SMC does not support connect with fastopen */
2181 if (msg->msg_flags & MSG_FASTOPEN) {
2182 /* not connected yet, fallback */
2183 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2184 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2191 } else if ((sk->sk_state != SMC_ACTIVE) &&
2192 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2193 (sk->sk_state != SMC_INIT)) {
2198 if (smc->use_fallback) {
2199 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2201 rc = smc_tx_sendmsg(smc, msg, len);
2202 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2209 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2212 struct sock *sk = sock->sk;
2213 struct smc_sock *smc;
2218 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2219 /* socket was connected before, no more data to read */
2223 if ((sk->sk_state == SMC_INIT) ||
2224 (sk->sk_state == SMC_LISTEN) ||
2225 (sk->sk_state == SMC_CLOSED))
2228 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2233 if (smc->use_fallback) {
2234 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2236 msg->msg_namelen = 0;
2237 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2238 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2246 static __poll_t smc_accept_poll(struct sock *parent)
2248 struct smc_sock *isk = smc_sk(parent);
2251 spin_lock(&isk->accept_q_lock);
2252 if (!list_empty(&isk->accept_q))
2253 mask = EPOLLIN | EPOLLRDNORM;
2254 spin_unlock(&isk->accept_q_lock);
2259 static __poll_t smc_poll(struct file *file, struct socket *sock,
2262 struct sock *sk = sock->sk;
2263 struct smc_sock *smc;
2269 smc = smc_sk(sock->sk);
2270 if (smc->use_fallback) {
2271 /* delegate to CLC child sock */
2272 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2273 sk->sk_err = smc->clcsock->sk->sk_err;
2275 if (sk->sk_state != SMC_CLOSED)
2276 sock_poll_wait(file, sock, wait);
2279 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2280 (sk->sk_state == SMC_CLOSED))
2282 if (sk->sk_state == SMC_LISTEN) {
2283 /* woken up by sk_data_ready in smc_listen_work() */
2284 mask |= smc_accept_poll(sk);
2285 } else if (smc->use_fallback) { /* as result of connect_work()*/
2286 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2288 sk->sk_err = smc->clcsock->sk->sk_err;
2290 if ((sk->sk_state != SMC_INIT &&
2291 atomic_read(&smc->conn.sndbuf_space)) ||
2292 sk->sk_shutdown & SEND_SHUTDOWN) {
2293 mask |= EPOLLOUT | EPOLLWRNORM;
2295 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2296 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2298 if (atomic_read(&smc->conn.bytes_to_rcv))
2299 mask |= EPOLLIN | EPOLLRDNORM;
2300 if (sk->sk_shutdown & RCV_SHUTDOWN)
2301 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2302 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2304 if (smc->conn.urg_state == SMC_URG_VALID)
2312 static int smc_shutdown(struct socket *sock, int how)
2314 struct sock *sk = sock->sk;
2315 bool do_shutdown = true;
2316 struct smc_sock *smc;
2323 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2329 if ((sk->sk_state != SMC_ACTIVE) &&
2330 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2331 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2332 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2333 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2334 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2336 if (smc->use_fallback) {
2337 rc = kernel_sock_shutdown(smc->clcsock, how);
2338 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2339 if (sk->sk_shutdown == SHUTDOWN_MASK) {
2340 sk->sk_state = SMC_CLOSED;
2346 case SHUT_RDWR: /* shutdown in both directions */
2347 old_state = sk->sk_state;
2348 rc = smc_close_active(smc);
2349 if (old_state == SMC_ACTIVE &&
2350 sk->sk_state == SMC_PEERCLOSEWAIT1)
2351 do_shutdown = false;
2354 rc = smc_close_shutdown_write(smc);
2358 /* nothing more to do because peer is not involved */
2361 if (do_shutdown && smc->clcsock)
2362 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2363 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2364 sk->sk_shutdown |= how + 1;
2368 return rc ? rc : rc1;
2371 static int smc_setsockopt(struct socket *sock, int level, int optname,
2372 sockptr_t optval, unsigned int optlen)
2374 struct sock *sk = sock->sk;
2375 struct smc_sock *smc;
2378 if (level == SOL_TCP && optname == TCP_ULP)
2383 /* generic setsockopts reaching us here always apply to the
2386 mutex_lock(&smc->clcsock_release_lock);
2387 if (!smc->clcsock) {
2388 mutex_unlock(&smc->clcsock_release_lock);
2391 if (unlikely(!smc->clcsock->ops->setsockopt))
2394 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2396 if (smc->clcsock->sk->sk_err) {
2397 sk->sk_err = smc->clcsock->sk->sk_err;
2398 sk_error_report(sk);
2400 mutex_unlock(&smc->clcsock_release_lock);
2402 if (optlen < sizeof(int))
2404 if (copy_from_sockptr(&val, optval, sizeof(int)))
2408 if (rc || smc->use_fallback)
2412 case TCP_FASTOPEN_CONNECT:
2413 case TCP_FASTOPEN_KEY:
2414 case TCP_FASTOPEN_NO_COOKIE:
2415 /* option not supported by SMC */
2416 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2417 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2423 if (sk->sk_state != SMC_INIT &&
2424 sk->sk_state != SMC_LISTEN &&
2425 sk->sk_state != SMC_CLOSED) {
2427 SMC_STAT_INC(smc, ndly_cnt);
2428 smc_tx_pending(&smc->conn);
2429 cancel_delayed_work(&smc->conn.tx_work);
2434 if (sk->sk_state != SMC_INIT &&
2435 sk->sk_state != SMC_LISTEN &&
2436 sk->sk_state != SMC_CLOSED) {
2438 SMC_STAT_INC(smc, cork_cnt);
2439 smc_tx_pending(&smc->conn);
2440 cancel_delayed_work(&smc->conn.tx_work);
2444 case TCP_DEFER_ACCEPT:
2445 smc->sockopt_defer_accept = val;
2456 static int smc_getsockopt(struct socket *sock, int level, int optname,
2457 char __user *optval, int __user *optlen)
2459 struct smc_sock *smc;
2462 smc = smc_sk(sock->sk);
2463 mutex_lock(&smc->clcsock_release_lock);
2464 if (!smc->clcsock) {
2465 mutex_unlock(&smc->clcsock_release_lock);
2468 /* socket options apply to the CLC socket */
2469 if (unlikely(!smc->clcsock->ops->getsockopt)) {
2470 mutex_unlock(&smc->clcsock_release_lock);
2473 rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2475 mutex_unlock(&smc->clcsock_release_lock);
2479 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2482 union smc_host_cursor cons, urg;
2483 struct smc_connection *conn;
2484 struct smc_sock *smc;
2487 smc = smc_sk(sock->sk);
2489 lock_sock(&smc->sk);
2490 if (smc->use_fallback) {
2491 if (!smc->clcsock) {
2492 release_sock(&smc->sk);
2495 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2496 release_sock(&smc->sk);
2500 case SIOCINQ: /* same as FIONREAD */
2501 if (smc->sk.sk_state == SMC_LISTEN) {
2502 release_sock(&smc->sk);
2505 if (smc->sk.sk_state == SMC_INIT ||
2506 smc->sk.sk_state == SMC_CLOSED)
2509 answ = atomic_read(&smc->conn.bytes_to_rcv);
2512 /* output queue size (not send + not acked) */
2513 if (smc->sk.sk_state == SMC_LISTEN) {
2514 release_sock(&smc->sk);
2517 if (smc->sk.sk_state == SMC_INIT ||
2518 smc->sk.sk_state == SMC_CLOSED)
2521 answ = smc->conn.sndbuf_desc->len -
2522 atomic_read(&smc->conn.sndbuf_space);
2525 /* output queue size (not send only) */
2526 if (smc->sk.sk_state == SMC_LISTEN) {
2527 release_sock(&smc->sk);
2530 if (smc->sk.sk_state == SMC_INIT ||
2531 smc->sk.sk_state == SMC_CLOSED)
2534 answ = smc_tx_prepared_sends(&smc->conn);
2537 if (smc->sk.sk_state == SMC_LISTEN) {
2538 release_sock(&smc->sk);
2541 if (smc->sk.sk_state == SMC_INIT ||
2542 smc->sk.sk_state == SMC_CLOSED) {
2545 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2546 smc_curs_copy(&urg, &conn->urg_curs, conn);
2547 answ = smc_curs_diff(conn->rmb_desc->len,
2552 release_sock(&smc->sk);
2553 return -ENOIOCTLCMD;
2555 release_sock(&smc->sk);
2557 return put_user(answ, (int __user *)arg);
2560 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2561 int offset, size_t size, int flags)
2563 struct sock *sk = sock->sk;
2564 struct smc_sock *smc;
2569 if (sk->sk_state != SMC_ACTIVE) {
2574 if (smc->use_fallback) {
2575 rc = kernel_sendpage(smc->clcsock, page, offset,
2578 SMC_STAT_INC(smc, sendpage_cnt);
2579 rc = sock_no_sendpage(sock, page, offset, size, flags);
2586 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2587 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2588 * updates till whenever a respective page has been fully processed.
2589 * Note that subsequent recv() calls have to wait till all splice() processing
2592 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2593 struct pipe_inode_info *pipe, size_t len,
2596 struct sock *sk = sock->sk;
2597 struct smc_sock *smc;
2602 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2603 /* socket was connected before, no more data to read */
2607 if (sk->sk_state == SMC_INIT ||
2608 sk->sk_state == SMC_LISTEN ||
2609 sk->sk_state == SMC_CLOSED)
2612 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2617 if (smc->use_fallback) {
2618 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2625 if (flags & SPLICE_F_NONBLOCK)
2626 flags = MSG_DONTWAIT;
2629 SMC_STAT_INC(smc, splice_cnt);
2630 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2638 /* must look like tcp */
2639 static const struct proto_ops smc_sock_ops = {
2641 .owner = THIS_MODULE,
2642 .release = smc_release,
2644 .connect = smc_connect,
2645 .socketpair = sock_no_socketpair,
2646 .accept = smc_accept,
2647 .getname = smc_getname,
2650 .listen = smc_listen,
2651 .shutdown = smc_shutdown,
2652 .setsockopt = smc_setsockopt,
2653 .getsockopt = smc_getsockopt,
2654 .sendmsg = smc_sendmsg,
2655 .recvmsg = smc_recvmsg,
2656 .mmap = sock_no_mmap,
2657 .sendpage = smc_sendpage,
2658 .splice_read = smc_splice_read,
2661 static int smc_create(struct net *net, struct socket *sock, int protocol,
2664 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2665 struct smc_sock *smc;
2669 rc = -ESOCKTNOSUPPORT;
2670 if (sock->type != SOCK_STREAM)
2673 rc = -EPROTONOSUPPORT;
2674 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2678 sock->ops = &smc_sock_ops;
2679 sk = smc_sock_alloc(net, sock, protocol);
2683 /* create internal TCP socket for CLC handshake and fallback */
2685 smc->use_fallback = false; /* assume rdma capability first */
2686 smc->fallback_rsn = 0;
2687 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2690 sk_common_release(sk);
2693 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2694 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2700 static const struct net_proto_family smc_sock_family_ops = {
2702 .owner = THIS_MODULE,
2703 .create = smc_create,
2706 unsigned int smc_net_id;
2708 static __net_init int smc_net_init(struct net *net)
2710 return smc_pnet_net_init(net);
2713 static void __net_exit smc_net_exit(struct net *net)
2715 smc_pnet_net_exit(net);
2718 static __net_init int smc_net_stat_init(struct net *net)
2720 return smc_stats_init(net);
2723 static void __net_exit smc_net_stat_exit(struct net *net)
2725 smc_stats_exit(net);
2728 static struct pernet_operations smc_net_ops = {
2729 .init = smc_net_init,
2730 .exit = smc_net_exit,
2732 .size = sizeof(struct smc_net),
2735 static struct pernet_operations smc_net_stat_ops = {
2736 .init = smc_net_stat_init,
2737 .exit = smc_net_stat_exit,
2740 static int __init smc_init(void)
2744 rc = register_pernet_subsys(&smc_net_ops);
2748 rc = register_pernet_subsys(&smc_net_stat_ops);
2750 goto out_pernet_subsys;
2757 goto out_pernet_subsys_stat;
2759 rc = smc_pnet_init();
2764 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
2768 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
2770 goto out_alloc_hs_wq;
2772 rc = smc_core_init();
2774 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2778 rc = smc_llc_init();
2780 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2784 rc = smc_cdc_init();
2786 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2790 rc = proto_register(&smc_proto, 1);
2792 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2796 rc = proto_register(&smc_proto6, 1);
2798 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2802 rc = sock_register(&smc_sock_family_ops);
2804 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2807 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2808 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2810 rc = smc_ib_register_client();
2812 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2816 static_branch_enable(&tcp_have_smc);
2820 sock_unregister(PF_SMC);
2822 proto_unregister(&smc_proto6);
2824 proto_unregister(&smc_proto);
2828 destroy_workqueue(smc_close_wq);
2830 destroy_workqueue(smc_hs_wq);
2835 out_pernet_subsys_stat:
2836 unregister_pernet_subsys(&smc_net_stat_ops);
2838 unregister_pernet_subsys(&smc_net_ops);
2843 static void __exit smc_exit(void)
2845 static_branch_disable(&tcp_have_smc);
2846 sock_unregister(PF_SMC);
2848 smc_ib_unregister_client();
2849 destroy_workqueue(smc_close_wq);
2850 destroy_workqueue(smc_hs_wq);
2851 proto_unregister(&smc_proto6);
2852 proto_unregister(&smc_proto);
2855 unregister_pernet_subsys(&smc_net_stat_ops);
2856 unregister_pernet_subsys(&smc_net_ops);
2860 module_init(smc_init);
2861 module_exit(smc_exit);
2863 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2864 MODULE_DESCRIPTION("smc socket address family");
2865 MODULE_LICENSE("GPL");
2866 MODULE_ALIAS_NETPROTO(PF_SMC);