1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
20 #include <linux/smc.h>
23 #include <rdma/ib_verbs.h>
24 #include <rdma/ib_cache.h>
33 #include "smc_close.h"
35 #include "smc_netlink.h"
36 #include "smc_stats.h"
37 #include "smc_tracepoint.h"
39 #define SMC_LGR_NUM_INCR 256
40 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
41 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
43 struct smc_lgr_list smc_lgr_list = { /* established link groups */
44 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
45 .list = LIST_HEAD_INIT(smc_lgr_list.list),
49 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
50 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
52 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
53 struct smc_buf_desc *buf_desc);
54 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
56 static void smc_link_down_work(struct work_struct *work);
58 /* return head of link group list and its lock for a given link group */
59 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
60 spinlock_t **lgr_lock)
63 *lgr_lock = &lgr->smcd->lgr_lock;
64 return &lgr->smcd->lgr_list;
67 *lgr_lock = &smc_lgr_list.lock;
68 return &smc_lgr_list.list;
71 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
73 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
76 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
78 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
81 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
83 /* client link group creation always follows the server link group
84 * creation. For client use a somewhat higher removal delay time,
85 * otherwise there is a risk of out-of-sync link groups.
88 mod_delayed_work(system_wq, &lgr->free_work,
89 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
90 SMC_LGR_FREE_DELAY_CLNT :
91 SMC_LGR_FREE_DELAY_SERV);
95 /* Register connection's alert token in our lookup structure.
96 * To use rbtrees we have to implement our own insert core.
97 * Requires @conns_lock
98 * @smc connection to register
99 * Returns 0 on success, != otherwise.
101 static void smc_lgr_add_alert_token(struct smc_connection *conn)
103 struct rb_node **link, *parent = NULL;
104 u32 token = conn->alert_token_local;
106 link = &conn->lgr->conns_all.rb_node;
108 struct smc_connection *cur = rb_entry(*link,
109 struct smc_connection, alert_node);
112 if (cur->alert_token_local > token)
113 link = &parent->rb_left;
115 link = &parent->rb_right;
117 /* Put the new node there */
118 rb_link_node(&conn->alert_node, parent, link);
119 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
122 /* assign an SMC-R link to the connection */
123 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
125 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
129 /* do link balancing */
130 conn->lnk = NULL; /* reset conn->lnk first */
131 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
132 struct smc_link *lnk = &conn->lgr->lnk[i];
134 if (lnk->state != expected || lnk->link_is_asym)
136 if (conn->lgr->role == SMC_CLNT) {
137 conn->lnk = lnk; /* temporary, SMC server assigns link*/
140 if (conn->lgr->conns_num % 2) {
141 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
142 struct smc_link *lnk2;
144 lnk2 = &conn->lgr->lnk[j];
145 if (lnk2->state == expected &&
146 !lnk2->link_is_asym) {
157 return SMC_CLC_DECL_NOACTLINK;
158 atomic_inc(&conn->lnk->conn_cnt);
162 /* Register connection in link group by assigning an alert token
163 * registered in a search tree.
164 * Requires @conns_lock
165 * Note that '0' is a reserved value and not assigned.
167 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
169 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
170 static atomic_t nexttoken = ATOMIC_INIT(0);
173 if (!conn->lgr->is_smcd) {
174 rc = smcr_lgr_conn_assign_link(conn, first);
180 /* find a new alert_token_local value not yet used by some connection
183 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
184 while (!conn->alert_token_local) {
185 conn->alert_token_local = atomic_inc_return(&nexttoken);
186 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
187 conn->alert_token_local = 0;
189 smc_lgr_add_alert_token(conn);
190 conn->lgr->conns_num++;
194 /* Unregister connection and reset the alert token of the given connection<
196 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
198 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
199 struct smc_link_group *lgr = conn->lgr;
201 rb_erase(&conn->alert_node, &lgr->conns_all);
203 atomic_dec(&conn->lnk->conn_cnt);
205 conn->alert_token_local = 0;
206 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
209 /* Unregister connection from lgr
211 static void smc_lgr_unregister_conn(struct smc_connection *conn)
213 struct smc_link_group *lgr = conn->lgr;
215 if (!smc_conn_lgr_valid(conn))
217 write_lock_bh(&lgr->conns_lock);
218 if (conn->alert_token_local) {
219 __smc_lgr_unregister_conn(conn);
221 write_unlock_bh(&lgr->conns_lock);
224 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
226 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
227 char hostname[SMC_MAX_HOSTNAME_LEN + 1];
228 char smc_seid[SMC_MAX_EID_LEN + 1];
229 struct nlattr *attrs;
234 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
235 &smc_gen_nl_family, NLM_F_MULTI,
236 SMC_NETLINK_GET_SYS_INFO);
241 attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
244 if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
246 if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
248 if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
250 if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true))
252 smc_clc_get_hostname(&host);
254 memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
255 hostname[SMC_MAX_HOSTNAME_LEN] = 0;
256 if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
259 if (smc_ism_is_v2_capable()) {
260 smc_ism_get_system_eid(&seid);
261 memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
262 smc_seid[SMC_MAX_EID_LEN] = 0;
263 if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
266 nla_nest_end(skb, attrs);
267 genlmsg_end(skb, nlh);
272 nla_nest_cancel(skb, attrs);
274 genlmsg_cancel(skb, nlh);
279 /* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */
280 static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr,
282 struct netlink_callback *cb,
283 struct nlattr *v2_attrs)
285 char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
286 char smc_eid[SMC_MAX_EID_LEN + 1];
288 if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
290 if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
292 if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
294 memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
295 smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
296 if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
298 memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
299 smc_eid[SMC_MAX_EID_LEN] = 0;
300 if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
303 nla_nest_end(skb, v2_attrs);
307 nla_nest_cancel(skb, v2_attrs);
311 static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr,
313 struct netlink_callback *cb)
315 struct nlattr *v2_attrs;
317 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2);
320 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway))
322 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_CONNS, lgr->max_conns))
324 if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_LINKS, lgr->max_links))
327 nla_nest_end(skb, v2_attrs);
331 nla_nest_cancel(skb, v2_attrs);
336 static int smc_nl_fill_lgr(struct smc_link_group *lgr,
338 struct netlink_callback *cb)
340 char smc_target[SMC_MAX_PNETID_LEN + 1];
341 struct nlattr *attrs, *v2_attrs;
343 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
347 if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
349 if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
351 if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
353 if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
355 if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type))
357 if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
359 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
360 lgr->net->net_cookie, SMC_NLA_LGR_R_PAD))
362 memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
363 smc_target[SMC_MAX_PNETID_LEN] = 0;
364 if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
366 if (lgr->smc_version > SMC_V1) {
367 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
370 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
372 if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
376 nla_nest_end(skb, attrs);
379 nla_nest_cancel(skb, attrs);
384 static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
385 struct smc_link *link,
387 struct netlink_callback *cb)
389 char smc_ibname[IB_DEVICE_NAME_MAX];
390 u8 smc_gid_target[41];
391 struct nlattr *attrs;
395 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
396 &smc_gen_nl_family, NLM_F_MULTI,
397 SMC_NETLINK_GET_LINK_SMCR);
401 attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
405 if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
407 if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
409 if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
410 atomic_read(&link->conn_cnt)))
412 if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
414 if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
416 snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
417 if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
419 memcpy(&link_uid, link->link_uid, sizeof(link_uid));
420 if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
422 memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
423 if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
425 memset(smc_gid_target, 0, sizeof(smc_gid_target));
426 smc_gid_be16_convert(smc_gid_target, link->gid);
427 if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
429 memset(smc_gid_target, 0, sizeof(smc_gid_target));
430 smc_gid_be16_convert(smc_gid_target, link->peer_gid);
431 if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
434 nla_nest_end(skb, attrs);
435 genlmsg_end(skb, nlh);
438 nla_nest_cancel(skb, attrs);
440 genlmsg_cancel(skb, nlh);
445 static int smc_nl_handle_lgr(struct smc_link_group *lgr,
447 struct netlink_callback *cb,
453 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
454 &smc_gen_nl_family, NLM_F_MULTI,
455 SMC_NETLINK_GET_LGR_SMCR);
458 if (smc_nl_fill_lgr(lgr, skb, cb))
461 genlmsg_end(skb, nlh);
464 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
465 if (!smc_link_usable(&lgr->lnk[i]))
467 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
474 genlmsg_cancel(skb, nlh);
479 static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
481 struct netlink_callback *cb,
484 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
485 struct smc_link_group *lgr;
486 int snum = cb_ctx->pos[0];
489 spin_lock_bh(&smc_lgr->lock);
490 list_for_each_entry(lgr, &smc_lgr->list, list) {
493 if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
499 spin_unlock_bh(&smc_lgr->lock);
500 cb_ctx->pos[0] = num;
503 static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
505 struct netlink_callback *cb)
507 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
508 struct smcd_dev *smcd = lgr->smcd;
509 struct smcd_gid smcd_gid;
510 struct nlattr *attrs;
513 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
514 &smc_gen_nl_family, NLM_F_MULTI,
515 SMC_NETLINK_GET_LGR_SMCD);
519 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
523 if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
525 smcd->ops->get_local_gid(smcd, &smcd_gid);
526 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
527 smcd_gid.gid, SMC_NLA_LGR_D_PAD))
529 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_EXT_GID,
530 smcd_gid.gid_ext, SMC_NLA_LGR_D_PAD))
532 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid.gid,
535 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_EXT_GID,
536 lgr->peer_gid.gid_ext, SMC_NLA_LGR_D_PAD))
538 if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
540 if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
542 if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
544 memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
545 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
546 if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
548 if (lgr->smc_version > SMC_V1) {
549 struct nlattr *v2_attrs;
551 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON);
554 if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
557 nla_nest_end(skb, attrs);
558 genlmsg_end(skb, nlh);
562 nla_nest_cancel(skb, attrs);
564 genlmsg_cancel(skb, nlh);
569 static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
571 struct netlink_callback *cb)
573 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
574 struct smc_link_group *lgr;
575 int snum = cb_ctx->pos[1];
578 spin_lock_bh(&dev->lgr_lock);
579 list_for_each_entry(lgr, &dev->lgr_list, list) {
584 rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
591 spin_unlock_bh(&dev->lgr_lock);
592 cb_ctx->pos[1] = num;
596 static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
598 struct netlink_callback *cb)
600 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
601 struct smcd_dev *smcd_dev;
602 int snum = cb_ctx->pos[0];
605 mutex_lock(&dev_list->mutex);
606 list_for_each_entry(smcd_dev, &dev_list->list, list) {
607 if (list_empty(&smcd_dev->lgr_list))
611 rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
618 mutex_unlock(&dev_list->mutex);
619 cb_ctx->pos[0] = num;
623 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
625 bool list_links = false;
627 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
631 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
633 bool list_links = true;
635 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
639 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
641 smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
645 void smc_lgr_cleanup_early(struct smc_link_group *lgr)
647 spinlock_t *lgr_lock;
652 smc_lgr_list_head(lgr, &lgr_lock);
653 spin_lock_bh(lgr_lock);
654 /* do not use this link group for new connections */
655 if (!list_empty(&lgr->list))
656 list_del_init(&lgr->list);
657 spin_unlock_bh(lgr_lock);
658 __smc_lgr_terminate(lgr, true);
661 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
665 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
666 struct smc_link *lnk = &lgr->lnk[i];
668 if (smc_link_sendable(lnk))
669 lnk->state = SMC_LNK_INACTIVE;
671 wake_up_all(&lgr->llc_msg_waiter);
672 wake_up_all(&lgr->llc_flow_waiter);
675 static void smc_lgr_free(struct smc_link_group *lgr);
677 static void smc_lgr_free_work(struct work_struct *work)
679 struct smc_link_group *lgr = container_of(to_delayed_work(work),
680 struct smc_link_group,
682 spinlock_t *lgr_lock;
685 smc_lgr_list_head(lgr, &lgr_lock);
686 spin_lock_bh(lgr_lock);
688 spin_unlock_bh(lgr_lock);
691 read_lock_bh(&lgr->conns_lock);
692 conns = RB_EMPTY_ROOT(&lgr->conns_all);
693 read_unlock_bh(&lgr->conns_lock);
694 if (!conns) { /* number of lgr connections is no longer zero */
695 spin_unlock_bh(lgr_lock);
698 list_del_init(&lgr->list); /* remove from smc_lgr_list */
699 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
700 spin_unlock_bh(lgr_lock);
701 cancel_delayed_work(&lgr->free_work);
703 if (!lgr->is_smcd && !lgr->terminating)
704 smc_llc_send_link_delete_all(lgr, true,
705 SMC_LLC_DEL_PROG_INIT_TERM);
706 if (lgr->is_smcd && !lgr->terminating)
707 smc_ism_signal_shutdown(lgr);
709 smcr_lgr_link_deactivate_all(lgr);
713 static void smc_lgr_terminate_work(struct work_struct *work)
715 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
718 __smc_lgr_terminate(lgr, true);
721 /* return next unique link id for the lgr */
722 static u8 smcr_next_link_id(struct smc_link_group *lgr)
729 link_id = ++lgr->next_link_id;
730 if (!link_id) /* skip zero as link_id */
731 link_id = ++lgr->next_link_id;
732 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
733 if (smc_link_usable(&lgr->lnk[i]) &&
734 lgr->lnk[i].link_id == link_id)
742 static void smcr_copy_dev_info_to_link(struct smc_link *link)
744 struct smc_ib_device *smcibdev = link->smcibdev;
746 snprintf(link->ibname, sizeof(link->ibname), "%s",
747 smcibdev->ibdev->name);
748 link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
751 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
752 u8 link_idx, struct smc_init_info *ini)
754 struct smc_ib_device *smcibdev;
758 if (lgr->smc_version == SMC_V2) {
759 lnk->smcibdev = ini->smcrv2.ib_dev_v2;
760 lnk->ibport = ini->smcrv2.ib_port_v2;
762 lnk->smcibdev = ini->ib_dev;
763 lnk->ibport = ini->ib_port;
765 get_device(&lnk->smcibdev->ibdev->dev);
766 atomic_inc(&lnk->smcibdev->lnk_cnt);
767 refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
769 lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
770 lnk->link_id = smcr_next_link_id(lgr);
772 smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
773 lnk->link_idx = link_idx;
774 lnk->wr_rx_id_compl = 0;
775 smc_ibdev_cnt_inc(lnk);
776 smcr_copy_dev_info_to_link(lnk);
777 atomic_set(&lnk->conn_cnt, 0);
778 smc_llc_link_set_uid(lnk);
779 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
780 if (!lnk->smcibdev->initialized) {
781 rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
785 get_random_bytes(rndvec, sizeof(rndvec));
786 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
788 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
789 ini->vlan_id, lnk->gid, &lnk->sgid_index,
790 lgr->smc_version == SMC_V2 ?
791 &ini->smcrv2 : NULL);
794 rc = smc_llc_link_init(lnk);
797 rc = smc_wr_alloc_link_mem(lnk);
800 rc = smc_ib_create_protection_domain(lnk);
803 rc = smc_ib_create_queue_pair(lnk);
806 rc = smc_wr_create_link(lnk);
809 lnk->state = SMC_LNK_ACTIVATING;
813 smc_ib_destroy_queue_pair(lnk);
815 smc_ib_dealloc_protection_domain(lnk);
817 smc_wr_free_link_mem(lnk);
819 smc_llc_link_clear(lnk, false);
821 smc_ibdev_cnt_dec(lnk);
822 put_device(&lnk->smcibdev->ibdev->dev);
823 smcibdev = lnk->smcibdev;
824 memset(lnk, 0, sizeof(struct smc_link));
825 lnk->state = SMC_LNK_UNUSED;
826 if (!atomic_dec_return(&smcibdev->lnk_cnt))
827 wake_up(&smcibdev->lnks_deleted);
828 smc_lgr_put(lgr); /* lgr_hold above */
832 /* create a new SMC link group */
833 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
835 struct smc_link_group *lgr;
836 struct list_head *lgr_list;
837 struct smcd_dev *smcd;
838 struct smc_link *lnk;
839 spinlock_t *lgr_lock;
844 if (ini->is_smcd && ini->vlan_id) {
845 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
847 rc = SMC_CLC_DECL_ISMVLANERR;
852 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
854 rc = SMC_CLC_DECL_MEM;
857 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
858 SMC_LGR_ID_SIZE, &lgr->id);
863 lgr->is_smcd = ini->is_smcd;
865 lgr->terminating = 0;
867 lgr->vlan_id = ini->vlan_id;
868 refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
869 init_rwsem(&lgr->sndbufs_lock);
870 init_rwsem(&lgr->rmbs_lock);
871 rwlock_init(&lgr->conns_lock);
872 for (i = 0; i < SMC_RMBE_SIZES; i++) {
873 INIT_LIST_HEAD(&lgr->sndbufs[i]);
874 INIT_LIST_HEAD(&lgr->rmbs[i]);
876 lgr->next_link_id = 0;
877 smc_lgr_list.num += SMC_LGR_NUM_INCR;
878 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
879 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
880 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
881 lgr->conns_all = RB_ROOT;
883 /* SMC-D specific settings */
884 smcd = ini->ism_dev[ini->ism_selected];
885 get_device(smcd->ops->get_dev(smcd));
887 ini->ism_peer_gid[ini->ism_selected].gid;
888 lgr->peer_gid.gid_ext =
889 ini->ism_peer_gid[ini->ism_selected].gid_ext;
890 lgr->smcd = ini->ism_dev[ini->ism_selected];
891 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
892 lgr_lock = &lgr->smcd->lgr_lock;
893 lgr->smc_version = ini->smcd_version;
894 lgr->peer_shutdown = 0;
895 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
897 /* SMC-R specific settings */
898 struct smc_ib_device *ibdev;
901 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
902 lgr->smc_version = ini->smcr_version;
903 memcpy(lgr->peer_systemid, ini->peer_systemid,
905 if (lgr->smc_version == SMC_V2) {
906 ibdev = ini->smcrv2.ib_dev_v2;
907 ibport = ini->smcrv2.ib_port_v2;
908 lgr->saddr = ini->smcrv2.saddr;
909 lgr->uses_gateway = ini->smcrv2.uses_gateway;
910 memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac,
912 lgr->max_conns = ini->max_conns;
913 lgr->max_links = ini->max_links;
916 ibport = ini->ib_port;
917 lgr->max_conns = SMC_CONN_PER_LGR_MAX;
918 lgr->max_links = SMC_LINKS_ADD_LNK_MAX;
920 memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
922 rc = smc_wr_alloc_lgr_mem(lgr);
925 smc_llc_lgr_init(lgr, smc);
927 link_idx = SMC_SINGLE_LINK;
928 lnk = &lgr->lnk[link_idx];
929 rc = smcr_link_init(lgr, lnk, link_idx, ini);
931 smc_wr_free_lgr_mem(lgr);
934 lgr->net = smc_ib_net(lnk->smcibdev);
935 lgr_list = &smc_lgr_list.list;
936 lgr_lock = &smc_lgr_list.lock;
937 lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type;
938 atomic_inc(&lgr_cnt);
941 spin_lock_bh(lgr_lock);
942 list_add_tail(&lgr->list, lgr_list);
943 spin_unlock_bh(lgr_lock);
947 destroy_workqueue(lgr->tx_wq);
951 if (ini->is_smcd && ini->vlan_id)
952 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
956 rc = SMC_CLC_DECL_MEM;
958 rc = SMC_CLC_DECL_INTERR;
963 static int smc_write_space(struct smc_connection *conn)
965 int buffer_len = conn->peer_rmbe_size;
966 union smc_host_cursor prod;
967 union smc_host_cursor cons;
970 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
971 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
972 /* determine rx_buf space */
973 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
977 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
978 struct smc_wr_buf *wr_buf)
980 struct smc_connection *conn = &smc->conn;
981 union smc_host_cursor cons, fin;
985 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
986 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
987 /* set prod cursor to old state, enforce tx_rdma_writes() */
988 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
989 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
991 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
992 /* cons cursor advanced more than fin, and prod was set
993 * fin above, so now prod is smaller than cons. Fix that.
995 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
996 smc_curs_add(conn->sndbuf_desc->len,
997 &conn->tx_curs_sent, diff);
998 smc_curs_add(conn->sndbuf_desc->len,
999 &conn->tx_curs_fin, diff);
1001 smp_mb__before_atomic();
1002 atomic_add(diff, &conn->sndbuf_space);
1003 smp_mb__after_atomic();
1005 smc_curs_add(conn->peer_rmbe_size,
1006 &conn->local_tx_ctrl.prod, diff);
1007 smc_curs_add(conn->peer_rmbe_size,
1008 &conn->local_tx_ctrl_fin, diff);
1010 /* recalculate, value is used by tx_rdma_writes() */
1011 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
1013 if (smc->sk.sk_state != SMC_INIT &&
1014 smc->sk.sk_state != SMC_CLOSED) {
1015 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
1017 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
1018 smc->sk.sk_data_ready(&smc->sk);
1021 smc_wr_tx_put_slot(conn->lnk,
1022 (struct smc_wr_tx_pend_priv *)pend);
1027 void smc_switch_link_and_count(struct smc_connection *conn,
1028 struct smc_link *to_lnk)
1030 atomic_dec(&conn->lnk->conn_cnt);
1031 /* link_hold in smc_conn_create() */
1032 smcr_link_put(conn->lnk);
1034 atomic_inc(&conn->lnk->conn_cnt);
1035 /* link_put in smc_conn_free() */
1036 smcr_link_hold(conn->lnk);
1039 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
1040 struct smc_link *from_lnk, bool is_dev_err)
1042 struct smc_link *to_lnk = NULL;
1043 struct smc_cdc_tx_pend *pend;
1044 struct smc_connection *conn;
1045 struct smc_wr_buf *wr_buf;
1046 struct smc_sock *smc;
1047 struct rb_node *node;
1050 /* link is inactive, wake up tx waiters */
1051 smc_wr_wakeup_tx_wait(from_lnk);
1053 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1054 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1056 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1057 from_lnk->ibport == lgr->lnk[i].ibport) {
1060 to_lnk = &lgr->lnk[i];
1063 if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
1064 smc_lgr_terminate_sched(lgr);
1068 read_lock_bh(&lgr->conns_lock);
1069 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
1070 conn = rb_entry(node, struct smc_connection, alert_node);
1071 if (conn->lnk != from_lnk)
1073 smc = container_of(conn, struct smc_sock, conn);
1074 /* conn->lnk not yet set in SMC_INIT state */
1075 if (smc->sk.sk_state == SMC_INIT)
1077 if (smc->sk.sk_state == SMC_CLOSED ||
1078 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
1079 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
1080 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
1081 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
1082 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
1083 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
1084 smc->sk.sk_state == SMC_PEERABORTWAIT ||
1085 smc->sk.sk_state == SMC_PROCESSABORT) {
1086 spin_lock_bh(&conn->send_lock);
1087 smc_switch_link_and_count(conn, to_lnk);
1088 spin_unlock_bh(&conn->send_lock);
1091 sock_hold(&smc->sk);
1092 read_unlock_bh(&lgr->conns_lock);
1093 /* pre-fetch buffer outside of send_lock, might sleep */
1094 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
1097 /* avoid race with smcr_tx_sndbuf_nonempty() */
1098 spin_lock_bh(&conn->send_lock);
1099 smc_switch_link_and_count(conn, to_lnk);
1100 rc = smc_switch_cursor(smc, pend, wr_buf);
1101 spin_unlock_bh(&conn->send_lock);
1107 read_unlock_bh(&lgr->conns_lock);
1108 smc_wr_tx_link_put(to_lnk);
1112 smcr_link_down_cond_sched(to_lnk);
1113 smc_wr_tx_link_put(to_lnk);
1117 static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
1118 struct smc_link_group *lgr)
1120 struct rw_semaphore *lock; /* lock buffer list */
1123 if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1124 /* unregister rmb with peer */
1125 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1127 /* protect against smc_llc_cli_rkey_exchange() */
1128 down_read(&lgr->llc_conf_mutex);
1129 smc_llc_do_delete_rkey(lgr, buf_desc);
1130 buf_desc->is_conf_rkey = false;
1131 up_read(&lgr->llc_conf_mutex);
1132 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1136 if (buf_desc->is_reg_err) {
1137 /* buf registration failed, reuse not possible */
1138 lock = is_rmb ? &lgr->rmbs_lock :
1141 list_del(&buf_desc->list);
1144 smc_buf_free(lgr, is_rmb, buf_desc);
1146 /* memzero_explicit provides potential memory barrier semantics */
1147 memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
1148 WRITE_ONCE(buf_desc->used, 0);
1152 static void smc_buf_unuse(struct smc_connection *conn,
1153 struct smc_link_group *lgr)
1155 if (conn->sndbuf_desc) {
1156 if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
1157 smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
1159 memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
1160 WRITE_ONCE(conn->sndbuf_desc->used, 0);
1163 if (conn->rmb_desc) {
1164 if (!lgr->is_smcd) {
1165 smcr_buf_unuse(conn->rmb_desc, true, lgr);
1167 memzero_explicit(conn->rmb_desc->cpu_addr,
1168 conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
1169 WRITE_ONCE(conn->rmb_desc->used, 0);
1174 /* remove a finished connection from its link group */
1175 void smc_conn_free(struct smc_connection *conn)
1177 struct smc_link_group *lgr = conn->lgr;
1179 if (!lgr || conn->freed)
1180 /* Connection has never been registered in a
1181 * link group, or has already been freed.
1186 if (!smc_conn_lgr_valid(conn))
1187 /* Connection has already unregistered from
1193 if (!list_empty(&lgr->list))
1194 smc_ism_unset_conn(conn);
1195 tasklet_kill(&conn->rx_tsklet);
1197 smc_cdc_wait_pend_tx_wr(conn);
1198 if (current_work() != &conn->abort_work)
1199 cancel_work_sync(&conn->abort_work);
1201 if (!list_empty(&lgr->list)) {
1202 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1203 smc_lgr_unregister_conn(conn);
1206 if (!lgr->conns_num)
1207 smc_lgr_schedule_free_work(lgr);
1210 smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1211 smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */
1214 /* unregister a link from a buf_desc */
1215 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1216 struct smc_link *lnk)
1218 if (is_rmb || buf_desc->is_vm)
1219 buf_desc->is_reg_mr[lnk->link_idx] = false;
1220 if (!buf_desc->is_map_ib[lnk->link_idx])
1223 if ((is_rmb || buf_desc->is_vm) &&
1224 buf_desc->mr[lnk->link_idx]) {
1225 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1226 buf_desc->mr[lnk->link_idx] = NULL;
1229 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1231 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1233 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1234 buf_desc->is_map_ib[lnk->link_idx] = false;
1237 /* unmap all buffers of lgr for a deleted link */
1238 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1240 struct smc_link_group *lgr = lnk->lgr;
1241 struct smc_buf_desc *buf_desc, *bf;
1244 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1245 down_write(&lgr->rmbs_lock);
1246 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1247 smcr_buf_unmap_link(buf_desc, true, lnk);
1248 up_write(&lgr->rmbs_lock);
1250 down_write(&lgr->sndbufs_lock);
1251 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1253 smcr_buf_unmap_link(buf_desc, false, lnk);
1254 up_write(&lgr->sndbufs_lock);
1258 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1260 struct smc_link_group *lgr = lnk->lgr;
1263 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1264 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1265 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1269 static void __smcr_link_clear(struct smc_link *lnk)
1271 struct smc_link_group *lgr = lnk->lgr;
1272 struct smc_ib_device *smcibdev;
1274 smc_wr_free_link_mem(lnk);
1275 smc_ibdev_cnt_dec(lnk);
1276 put_device(&lnk->smcibdev->ibdev->dev);
1277 smcibdev = lnk->smcibdev;
1278 memset(lnk, 0, sizeof(struct smc_link));
1279 lnk->state = SMC_LNK_UNUSED;
1280 if (!atomic_dec_return(&smcibdev->lnk_cnt))
1281 wake_up(&smcibdev->lnks_deleted);
1282 smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */
1285 /* must be called under lgr->llc_conf_mutex lock */
1286 void smcr_link_clear(struct smc_link *lnk, bool log)
1288 if (!lnk->lgr || lnk->clearing ||
1289 lnk->state == SMC_LNK_UNUSED)
1293 smc_llc_link_clear(lnk, log);
1294 smcr_buf_unmap_lgr(lnk);
1295 smcr_rtoken_clear_link(lnk);
1296 smc_ib_modify_qp_error(lnk);
1297 smc_wr_free_link(lnk);
1298 smc_ib_destroy_queue_pair(lnk);
1299 smc_ib_dealloc_protection_domain(lnk);
1300 smcr_link_put(lnk); /* theoretically last link_put */
1303 void smcr_link_hold(struct smc_link *lnk)
1305 refcount_inc(&lnk->refcnt);
1308 void smcr_link_put(struct smc_link *lnk)
1310 if (refcount_dec_and_test(&lnk->refcnt))
1311 __smcr_link_clear(lnk);
1314 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1315 struct smc_buf_desc *buf_desc)
1319 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1320 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1322 if (!buf_desc->is_vm && buf_desc->pages)
1323 __free_pages(buf_desc->pages, buf_desc->order);
1324 else if (buf_desc->is_vm && buf_desc->cpu_addr)
1325 vfree(buf_desc->cpu_addr);
1329 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1330 struct smc_buf_desc *buf_desc)
1333 /* restore original buf len */
1334 buf_desc->len += sizeof(struct smcd_cdc_msg);
1335 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1337 kfree(buf_desc->cpu_addr);
1342 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1343 struct smc_buf_desc *buf_desc)
1346 smcd_buf_free(lgr, is_rmb, buf_desc);
1348 smcr_buf_free(lgr, is_rmb, buf_desc);
1351 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1353 struct smc_buf_desc *buf_desc, *bf_desc;
1354 struct list_head *buf_list;
1357 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1359 buf_list = &lgr->rmbs[i];
1361 buf_list = &lgr->sndbufs[i];
1362 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1364 list_del(&buf_desc->list);
1365 smc_buf_free(lgr, is_rmb, buf_desc);
1370 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1372 /* free send buffers */
1373 __smc_lgr_free_bufs(lgr, false);
1375 __smc_lgr_free_bufs(lgr, true);
1378 /* won't be freed until no one accesses to lgr anymore */
1379 static void __smc_lgr_free(struct smc_link_group *lgr)
1381 smc_lgr_free_bufs(lgr);
1383 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1384 wake_up(&lgr->smcd->lgrs_deleted);
1386 smc_wr_free_lgr_mem(lgr);
1387 if (!atomic_dec_return(&lgr_cnt))
1388 wake_up(&lgrs_deleted);
1393 /* remove a link group */
1394 static void smc_lgr_free(struct smc_link_group *lgr)
1398 if (!lgr->is_smcd) {
1399 down_write(&lgr->llc_conf_mutex);
1400 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1401 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1402 smcr_link_clear(&lgr->lnk[i], false);
1404 up_write(&lgr->llc_conf_mutex);
1405 smc_llc_lgr_clear(lgr);
1408 destroy_workqueue(lgr->tx_wq);
1410 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1411 put_device(lgr->smcd->ops->get_dev(lgr->smcd));
1413 smc_lgr_put(lgr); /* theoretically last lgr_put */
1416 void smc_lgr_hold(struct smc_link_group *lgr)
1418 refcount_inc(&lgr->refcnt);
1421 void smc_lgr_put(struct smc_link_group *lgr)
1423 if (refcount_dec_and_test(&lgr->refcnt))
1424 __smc_lgr_free(lgr);
1427 static void smc_sk_wake_ups(struct smc_sock *smc)
1429 smc->sk.sk_write_space(&smc->sk);
1430 smc->sk.sk_data_ready(&smc->sk);
1431 smc->sk.sk_state_change(&smc->sk);
1434 /* kill a connection */
1435 static void smc_conn_kill(struct smc_connection *conn, bool soft)
1437 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1439 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1440 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1442 smc_close_abort(conn);
1444 smc->sk.sk_err = ECONNABORTED;
1445 smc_sk_wake_ups(smc);
1446 if (conn->lgr->is_smcd) {
1447 smc_ism_unset_conn(conn);
1449 tasklet_kill(&conn->rx_tsklet);
1451 tasklet_unlock_wait(&conn->rx_tsklet);
1453 smc_cdc_wait_pend_tx_wr(conn);
1455 smc_lgr_unregister_conn(conn);
1456 smc_close_active_abort(smc);
1459 static void smc_lgr_cleanup(struct smc_link_group *lgr)
1462 smc_ism_signal_shutdown(lgr);
1464 u32 rsn = lgr->llc_termination_rsn;
1467 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1468 smc_llc_send_link_delete_all(lgr, false, rsn);
1469 smcr_lgr_link_deactivate_all(lgr);
1473 /* terminate link group
1474 * @soft: true if link group shutdown can take its time
1475 * false if immediate link group shutdown is required
1477 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1479 struct smc_connection *conn;
1480 struct smc_sock *smc;
1481 struct rb_node *node;
1483 if (lgr->terminating)
1484 return; /* lgr already terminating */
1485 /* cancel free_work sync, will terminate when lgr->freeing is set */
1486 cancel_delayed_work(&lgr->free_work);
1487 lgr->terminating = 1;
1489 /* kill remaining link group connections */
1490 read_lock_bh(&lgr->conns_lock);
1491 node = rb_first(&lgr->conns_all);
1493 read_unlock_bh(&lgr->conns_lock);
1494 conn = rb_entry(node, struct smc_connection, alert_node);
1495 smc = container_of(conn, struct smc_sock, conn);
1496 sock_hold(&smc->sk); /* sock_put below */
1497 lock_sock(&smc->sk);
1498 smc_conn_kill(conn, soft);
1499 release_sock(&smc->sk);
1500 sock_put(&smc->sk); /* sock_hold above */
1501 read_lock_bh(&lgr->conns_lock);
1502 node = rb_first(&lgr->conns_all);
1504 read_unlock_bh(&lgr->conns_lock);
1505 smc_lgr_cleanup(lgr);
1509 /* unlink link group and schedule termination */
1510 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1512 spinlock_t *lgr_lock;
1514 smc_lgr_list_head(lgr, &lgr_lock);
1515 spin_lock_bh(lgr_lock);
1516 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1517 spin_unlock_bh(lgr_lock);
1518 return; /* lgr already terminating */
1520 list_del_init(&lgr->list);
1522 spin_unlock_bh(lgr_lock);
1523 schedule_work(&lgr->terminate_work);
1526 /* Called when peer lgr shutdown (regularly or abnormally) is received */
1527 void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
1528 unsigned short vlan)
1530 struct smc_link_group *lgr, *l;
1531 LIST_HEAD(lgr_free_list);
1533 /* run common cleanup function and build free list */
1534 spin_lock_bh(&dev->lgr_lock);
1535 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1536 if ((!peer_gid->gid ||
1537 (lgr->peer_gid.gid == peer_gid->gid &&
1538 !smc_ism_is_virtual(dev) ? 1 :
1539 lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
1540 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1541 if (peer_gid->gid) /* peer triggered termination */
1542 lgr->peer_shutdown = 1;
1543 list_move(&lgr->list, &lgr_free_list);
1547 spin_unlock_bh(&dev->lgr_lock);
1549 /* cancel the regular free workers and actually free lgrs */
1550 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1551 list_del_init(&lgr->list);
1552 schedule_work(&lgr->terminate_work);
1556 /* Called when an SMCD device is removed or the smc module is unloaded */
1557 void smc_smcd_terminate_all(struct smcd_dev *smcd)
1559 struct smc_link_group *lgr, *lg;
1560 LIST_HEAD(lgr_free_list);
1562 spin_lock_bh(&smcd->lgr_lock);
1563 list_splice_init(&smcd->lgr_list, &lgr_free_list);
1564 list_for_each_entry(lgr, &lgr_free_list, list)
1566 spin_unlock_bh(&smcd->lgr_lock);
1568 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1569 list_del_init(&lgr->list);
1570 __smc_lgr_terminate(lgr, false);
1573 if (atomic_read(&smcd->lgr_cnt))
1574 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1577 /* Called when an SMCR device is removed or the smc module is unloaded.
1578 * If smcibdev is given, all SMCR link groups using this device are terminated.
1579 * If smcibdev is NULL, all SMCR link groups are terminated.
1581 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1583 struct smc_link_group *lgr, *lg;
1584 LIST_HEAD(lgr_free_list);
1587 spin_lock_bh(&smc_lgr_list.lock);
1589 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1590 list_for_each_entry(lgr, &lgr_free_list, list)
1593 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1594 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1595 if (lgr->lnk[i].smcibdev == smcibdev)
1596 smcr_link_down_cond_sched(&lgr->lnk[i]);
1600 spin_unlock_bh(&smc_lgr_list.lock);
1602 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1603 list_del_init(&lgr->list);
1604 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1605 __smc_lgr_terminate(lgr, false);
1609 if (atomic_read(&smcibdev->lnk_cnt))
1610 wait_event(smcibdev->lnks_deleted,
1611 !atomic_read(&smcibdev->lnk_cnt));
1613 if (atomic_read(&lgr_cnt))
1614 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1618 /* set new lgr type and clear all asymmetric link tagging */
1619 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1621 char *lgr_type = "";
1624 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1625 if (smc_link_usable(&lgr->lnk[i]))
1626 lgr->lnk[i].link_is_asym = false;
1627 if (lgr->type == new_type)
1629 lgr->type = new_type;
1631 switch (lgr->type) {
1635 case SMC_LGR_SINGLE:
1636 lgr_type = "SINGLE";
1638 case SMC_LGR_SYMMETRIC:
1639 lgr_type = "SYMMETRIC";
1641 case SMC_LGR_ASYMMETRIC_PEER:
1642 lgr_type = "ASYMMETRIC_PEER";
1644 case SMC_LGR_ASYMMETRIC_LOCAL:
1645 lgr_type = "ASYMMETRIC_LOCAL";
1648 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: "
1649 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1650 lgr->net->net_cookie, lgr_type, lgr->pnet_id);
1653 /* set new lgr type and tag a link as asymmetric */
1654 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1655 enum smc_lgr_type new_type, int asym_lnk_idx)
1657 smcr_lgr_set_type(lgr, new_type);
1658 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1661 /* abort connection, abort_work scheduled from tasklet context */
1662 static void smc_conn_abort_work(struct work_struct *work)
1664 struct smc_connection *conn = container_of(work,
1665 struct smc_connection,
1667 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1669 lock_sock(&smc->sk);
1670 smc_conn_kill(conn, true);
1671 release_sock(&smc->sk);
1672 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1675 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1677 struct smc_link_group *lgr, *n;
1679 spin_lock_bh(&smc_lgr_list.lock);
1680 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1681 struct smc_link *link;
1683 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1684 SMC_MAX_PNETID_LEN) ||
1685 lgr->type == SMC_LGR_SYMMETRIC ||
1686 lgr->type == SMC_LGR_ASYMMETRIC_PEER ||
1687 !rdma_dev_access_netns(smcibdev->ibdev, lgr->net))
1690 if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1)
1693 /* trigger local add link processing */
1694 link = smc_llc_usable_link(lgr);
1696 smc_llc_add_link_local(link);
1698 spin_unlock_bh(&smc_lgr_list.lock);
1701 /* link is down - switch connections to alternate link,
1702 * must be called under lgr->llc_conf_mutex lock
1704 static void smcr_link_down(struct smc_link *lnk)
1706 struct smc_link_group *lgr = lnk->lgr;
1707 struct smc_link *to_lnk;
1710 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1713 to_lnk = smc_switch_conns(lgr, lnk, true);
1714 if (!to_lnk) { /* no backup link available */
1715 smcr_link_clear(lnk, true);
1718 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1719 del_link_id = lnk->link_id;
1721 if (lgr->role == SMC_SERV) {
1722 /* trigger local delete link processing */
1723 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1725 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1726 /* another llc task is ongoing */
1727 up_write(&lgr->llc_conf_mutex);
1728 wait_event_timeout(lgr->llc_flow_waiter,
1729 (list_empty(&lgr->list) ||
1730 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1732 down_write(&lgr->llc_conf_mutex);
1734 if (!list_empty(&lgr->list)) {
1735 smc_llc_send_delete_link(to_lnk, del_link_id,
1737 SMC_LLC_DEL_LOST_PATH);
1738 smcr_link_clear(lnk, true);
1740 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1744 /* must be called under lgr->llc_conf_mutex lock */
1745 void smcr_link_down_cond(struct smc_link *lnk)
1747 if (smc_link_downing(&lnk->state)) {
1748 trace_smcr_link_down(lnk, __builtin_return_address(0));
1749 smcr_link_down(lnk);
1753 /* will get the lgr->llc_conf_mutex lock */
1754 void smcr_link_down_cond_sched(struct smc_link *lnk)
1756 if (smc_link_downing(&lnk->state)) {
1757 trace_smcr_link_down(lnk, __builtin_return_address(0));
1758 schedule_work(&lnk->link_down_wrk);
1762 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1764 struct smc_link_group *lgr, *n;
1767 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1768 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1769 SMC_MAX_PNETID_LEN))
1770 continue; /* lgr is not affected */
1771 if (list_empty(&lgr->list))
1773 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1774 struct smc_link *lnk = &lgr->lnk[i];
1776 if (smc_link_usable(lnk) &&
1777 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1778 smcr_link_down_cond_sched(lnk);
1783 static void smc_link_down_work(struct work_struct *work)
1785 struct smc_link *link = container_of(work, struct smc_link,
1787 struct smc_link_group *lgr = link->lgr;
1789 if (list_empty(&lgr->list))
1791 wake_up_all(&lgr->llc_msg_waiter);
1792 down_write(&lgr->llc_conf_mutex);
1793 smcr_link_down(link);
1794 up_write(&lgr->llc_conf_mutex);
1797 static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1798 struct netdev_nested_priv *priv)
1800 unsigned short *vlan_id = (unsigned short *)priv->data;
1802 if (is_vlan_dev(lower_dev)) {
1803 *vlan_id = vlan_dev_vlan_id(lower_dev);
1810 /* Determine vlan of internal TCP socket. */
1811 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1813 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1814 struct netdev_nested_priv priv;
1815 struct net_device *ndev;
1829 if (is_vlan_dev(ndev)) {
1830 ini->vlan_id = vlan_dev_vlan_id(ndev);
1834 priv.data = (void *)&ini->vlan_id;
1836 netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1845 static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
1849 enum smc_lgr_role role, u32 clcqpn,
1852 struct smc_link *lnk;
1855 if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
1859 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1862 if (!smc_link_active(lnk))
1864 /* use verbs API to check netns, instead of lgr->net */
1865 if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1867 if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1868 !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1869 (smcr_version == SMC_V2 ||
1870 !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
1876 static bool smcd_lgr_match(struct smc_link_group *lgr,
1877 struct smcd_dev *smcismdev,
1878 struct smcd_gid *peer_gid)
1880 if (lgr->peer_gid.gid != peer_gid->gid ||
1881 lgr->smcd != smcismdev)
1884 if (smc_ism_is_virtual(smcismdev) &&
1885 lgr->peer_gid.gid_ext != peer_gid->gid_ext)
1891 /* create a new SMC connection (and a new link group if necessary) */
1892 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1894 struct smc_connection *conn = &smc->conn;
1895 struct net *net = sock_net(&smc->sk);
1896 struct list_head *lgr_list;
1897 struct smc_link_group *lgr;
1898 enum smc_lgr_role role;
1899 spinlock_t *lgr_lock;
1902 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1904 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1906 ini->first_contact_local = 1;
1907 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1908 if (role == SMC_CLNT && ini->first_contact_peer)
1909 /* create new link group as well */
1912 /* determine if an existing link group can be reused */
1913 spin_lock_bh(lgr_lock);
1914 list_for_each_entry(lgr, lgr_list, list) {
1915 write_lock_bh(&lgr->conns_lock);
1917 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1918 &ini->ism_peer_gid[ini->ism_selected]) :
1919 smcr_lgr_match(lgr, ini->smcr_version,
1921 ini->peer_gid, ini->peer_mac, role,
1922 ini->ib_clcqpn, net)) &&
1924 (ini->smcd_version == SMC_V2 ||
1925 lgr->vlan_id == ini->vlan_id) &&
1926 (role == SMC_CLNT || ini->is_smcd ||
1927 (lgr->conns_num < lgr->max_conns &&
1928 !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
1929 /* link group found */
1930 ini->first_contact_local = 0;
1932 rc = smc_lgr_register_conn(conn, false);
1933 write_unlock_bh(&lgr->conns_lock);
1934 if (!rc && delayed_work_pending(&lgr->free_work))
1935 cancel_delayed_work(&lgr->free_work);
1938 write_unlock_bh(&lgr->conns_lock);
1940 spin_unlock_bh(lgr_lock);
1944 if (role == SMC_CLNT && !ini->first_contact_peer &&
1945 ini->first_contact_local) {
1946 /* Server reuses a link group, but Client wants to start
1948 * send out_of_sync decline, reason synchr. error
1950 return SMC_CLC_DECL_SYNCERR;
1954 if (ini->first_contact_local) {
1955 rc = smc_lgr_create(smc, ini);
1959 write_lock_bh(&lgr->conns_lock);
1960 rc = smc_lgr_register_conn(conn, true);
1961 write_unlock_bh(&lgr->conns_lock);
1963 smc_lgr_cleanup_early(lgr);
1967 smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */
1968 if (!conn->lgr->is_smcd)
1969 smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
1971 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1972 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1973 conn->urg_state = SMC_URG_READ;
1974 init_waitqueue_head(&conn->cdc_pend_tx_wq);
1975 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1977 conn->rx_off = sizeof(struct smcd_cdc_msg);
1978 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1982 #ifndef KERNEL_HAS_ATOMIC64
1983 spin_lock_init(&conn->acurs_lock);
1990 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1991 #define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
1993 /* convert the RMB size into the compressed notation (minimum 16K, see
1994 * SMCD/R_DMBE_SIZES.
1995 * In contrast to plain ilog2, this rounds towards the next power of 2,
1996 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1998 static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
2000 const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
2003 if (size <= SMC_BUF_MIN_SIZE)
2006 size = (size - 1) >> 14; /* convert to 16K multiple */
2007 compressed = min_t(u8, ilog2(size) + 1,
2008 is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
2010 if (!is_smcd && is_rmb)
2011 /* RMBs are backed by & limited to max size of scatterlists */
2012 compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
2017 /* convert the RMB size from compressed notation into integer */
2018 int smc_uncompress_bufsize(u8 compressed)
2022 size = 0x00000001 << (((int)compressed) + 14);
2026 /* try to reuse a sndbuf or rmb description slot for a certain
2027 * buffer size; if not available, return NULL
2029 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
2030 struct rw_semaphore *lock,
2031 struct list_head *buf_list)
2033 struct smc_buf_desc *buf_slot;
2036 list_for_each_entry(buf_slot, buf_list, list) {
2037 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
2046 /* one of the conditions for announcing a receiver's current window size is
2047 * that it "results in a minimum increase in the window size of 10% of the
2048 * receive buffer space" [RFC7609]
2050 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
2052 return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
2055 /* map an buf to a link */
2056 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
2057 struct smc_link *lnk)
2059 int rc, i, nents, offset, buf_size, size, access_flags;
2060 struct scatterlist *sg;
2063 if (buf_desc->is_map_ib[lnk->link_idx])
2066 if (buf_desc->is_vm) {
2067 buf = buf_desc->cpu_addr;
2068 buf_size = buf_desc->len;
2069 offset = offset_in_page(buf_desc->cpu_addr);
2070 nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE;
2075 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2079 if (buf_desc->is_vm) {
2080 /* virtually contiguous buffer */
2081 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2082 size = min_t(int, PAGE_SIZE - offset, buf_size);
2083 sg_set_page(sg, vmalloc_to_page(buf), size, offset);
2084 buf += size / sizeof(*buf);
2089 /* physically contiguous buffer */
2090 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2091 buf_desc->cpu_addr, buf_desc->len);
2094 /* map sg table to DMA address */
2095 rc = smc_ib_buf_map_sg(lnk, buf_desc,
2096 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2097 /* SMC protocol depends on mapping to one DMA address only */
2103 buf_desc->is_dma_need_sync |=
2104 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2106 if (is_rmb || buf_desc->is_vm) {
2107 /* create a new memory region for the RMB or vzalloced sndbuf */
2108 access_flags = is_rmb ?
2109 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2110 IB_ACCESS_LOCAL_WRITE;
2112 rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2113 buf_desc, lnk->link_idx);
2116 smc_ib_sync_sg_for_device(lnk, buf_desc,
2117 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2119 buf_desc->is_map_ib[lnk->link_idx] = true;
2123 smc_ib_buf_unmap_sg(lnk, buf_desc,
2124 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2126 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2130 /* register a new buf on IB device, rmb or vzalloced sndbuf
2131 * must be called under lgr->llc_conf_mutex lock
2133 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
2135 if (list_empty(&link->lgr->list))
2137 if (!buf_desc->is_reg_mr[link->link_idx]) {
2138 /* register memory region for new buf */
2139 if (buf_desc->is_vm)
2140 buf_desc->mr[link->link_idx]->iova =
2141 (uintptr_t)buf_desc->cpu_addr;
2142 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) {
2143 buf_desc->is_reg_err = true;
2146 buf_desc->is_reg_mr[link->link_idx] = true;
2151 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2152 struct list_head *lst, bool is_rmb)
2154 struct smc_buf_desc *buf_desc, *bf;
2158 list_for_each_entry_safe(buf_desc, bf, lst, list) {
2159 if (!buf_desc->used)
2161 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2170 /* map all used buffers of lgr for a new link */
2171 int smcr_buf_map_lgr(struct smc_link *lnk)
2173 struct smc_link_group *lgr = lnk->lgr;
2176 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2177 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2178 &lgr->rmbs[i], true);
2181 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2182 &lgr->sndbufs[i], false);
2189 /* register all used buffers of lgr for a new link,
2190 * must be called under lgr->llc_conf_mutex lock
2192 int smcr_buf_reg_lgr(struct smc_link *lnk)
2194 struct smc_link_group *lgr = lnk->lgr;
2195 struct smc_buf_desc *buf_desc, *bf;
2198 /* reg all RMBs for a new link */
2199 down_write(&lgr->rmbs_lock);
2200 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2201 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
2202 if (!buf_desc->used)
2204 rc = smcr_link_reg_buf(lnk, buf_desc);
2206 up_write(&lgr->rmbs_lock);
2211 up_write(&lgr->rmbs_lock);
2213 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2216 /* reg all vzalloced sndbufs for a new link */
2217 down_write(&lgr->sndbufs_lock);
2218 for (i = 0; i < SMC_RMBE_SIZES; i++) {
2219 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
2220 if (!buf_desc->used || !buf_desc->is_vm)
2222 rc = smcr_link_reg_buf(lnk, buf_desc);
2224 up_write(&lgr->sndbufs_lock);
2229 up_write(&lgr->sndbufs_lock);
2233 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
2234 bool is_rmb, int bufsize)
2236 struct smc_buf_desc *buf_desc;
2238 /* try to alloc a new buffer */
2239 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2241 return ERR_PTR(-ENOMEM);
2243 switch (lgr->buf_type) {
2244 case SMCR_PHYS_CONT_BUFS:
2245 case SMCR_MIXED_BUFS:
2246 buf_desc->order = get_order(bufsize);
2247 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
2248 __GFP_NOMEMALLOC | __GFP_COMP |
2249 __GFP_NORETRY | __GFP_ZERO,
2251 if (buf_desc->pages) {
2252 buf_desc->cpu_addr =
2253 (void *)page_address(buf_desc->pages);
2254 buf_desc->len = bufsize;
2255 buf_desc->is_vm = false;
2258 if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
2260 fallthrough; // try virtually continguous buf
2261 case SMCR_VIRT_CONT_BUFS:
2262 buf_desc->order = get_order(bufsize);
2263 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order);
2264 if (!buf_desc->cpu_addr)
2266 buf_desc->pages = NULL;
2267 buf_desc->len = bufsize;
2268 buf_desc->is_vm = true;
2275 return ERR_PTR(-EAGAIN);
2278 /* map buf_desc on all usable links,
2279 * unused buffers stay mapped as long as the link is up
2281 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
2282 struct smc_buf_desc *buf_desc, bool is_rmb)
2284 int i, rc = 0, cnt = 0;
2286 /* protect against parallel link reconfiguration */
2287 down_read(&lgr->llc_conf_mutex);
2288 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2289 struct smc_link *lnk = &lgr->lnk[i];
2291 if (!smc_link_usable(lnk))
2293 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2300 up_read(&lgr->llc_conf_mutex);
2306 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
2307 bool is_dmb, int bufsize)
2309 struct smc_buf_desc *buf_desc;
2312 /* try to alloc a new DMB */
2313 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2315 return ERR_PTR(-ENOMEM);
2317 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2321 return ERR_PTR(-EAGAIN);
2323 return ERR_PTR(-ENOSPC);
2324 return ERR_PTR(-EIO);
2326 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2327 /* CDC header stored in buf. So, pretend it was smaller */
2328 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2330 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2331 __GFP_NOWARN | __GFP_NORETRY |
2333 if (!buf_desc->cpu_addr) {
2335 return ERR_PTR(-EAGAIN);
2337 buf_desc->len = bufsize;
2342 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2344 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2345 struct smc_connection *conn = &smc->conn;
2346 struct smc_link_group *lgr = conn->lgr;
2347 struct list_head *buf_list;
2348 int bufsize, bufsize_comp;
2349 struct rw_semaphore *lock; /* lock buffer list */
2350 bool is_dgraded = false;
2353 /* use socket recv buffer size (w/o overhead) as start value */
2354 bufsize = smc->sk.sk_rcvbuf / 2;
2356 /* use socket send buffer size (w/o overhead) as start value */
2357 bufsize = smc->sk.sk_sndbuf / 2;
2359 for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb);
2360 bufsize_comp >= 0; bufsize_comp--) {
2362 lock = &lgr->rmbs_lock;
2363 buf_list = &lgr->rmbs[bufsize_comp];
2365 lock = &lgr->sndbufs_lock;
2366 buf_list = &lgr->sndbufs[bufsize_comp];
2368 bufsize = smc_uncompress_bufsize(bufsize_comp);
2370 /* check for reusable slot in the link group */
2371 buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
2373 buf_desc->is_dma_need_sync = 0;
2374 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2375 SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2376 break; /* found reusable slot */
2380 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2382 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
2384 if (PTR_ERR(buf_desc) == -ENOMEM)
2386 if (IS_ERR(buf_desc)) {
2389 SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2394 SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2395 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2398 list_add(&buf_desc->list, buf_list);
2403 if (IS_ERR(buf_desc))
2404 return PTR_ERR(buf_desc);
2407 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2408 smcr_buf_unuse(buf_desc, is_rmb, lgr);
2414 conn->rmb_desc = buf_desc;
2415 conn->rmbe_size_comp = bufsize_comp;
2416 smc->sk.sk_rcvbuf = bufsize * 2;
2417 atomic_set(&conn->bytes_to_rcv, 0);
2418 conn->rmbe_update_limit =
2419 smc_rmb_wnd_update_limit(buf_desc->len);
2421 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2423 conn->sndbuf_desc = buf_desc;
2424 smc->sk.sk_sndbuf = bufsize * 2;
2425 atomic_set(&conn->sndbuf_space, bufsize);
2430 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2432 if (!conn->sndbuf_desc->is_dma_need_sync)
2434 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd ||
2435 !smc_link_active(conn->lnk))
2437 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2440 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2444 if (!conn->rmb_desc->is_dma_need_sync)
2446 if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd)
2448 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2449 if (!smc_link_active(&conn->lgr->lnk[i]))
2451 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2456 /* create the send and receive buffer for an SMC socket;
2457 * receive buffers are called RMBs;
2458 * (even though the SMC protocol allows more than one RMB-element per RMB,
2459 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2460 * extra RMB for every connection in a link group
2462 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2466 /* create send buffer */
2467 rc = __smc_buf_create(smc, is_smcd, false);
2471 rc = __smc_buf_create(smc, is_smcd, true);
2473 down_write(&smc->conn.lgr->sndbufs_lock);
2474 list_del(&smc->conn.sndbuf_desc->list);
2475 up_write(&smc->conn.lgr->sndbufs_lock);
2476 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2477 smc->conn.sndbuf_desc = NULL;
2482 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2486 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2487 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2493 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2498 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2499 if (test_bit(i, lgr->rtokens_used_mask) &&
2500 lgr->rtokens[i][lnk_idx].rkey == rkey)
2506 /* set rtoken for a new link to an existing rmb */
2507 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2508 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2512 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2513 if (rtok_idx == -ENOENT)
2515 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2516 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2519 /* set rtoken for a new link whose link_id is given */
2520 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2521 __be64 nw_vaddr, __be32 nw_rkey)
2523 u64 dma_addr = be64_to_cpu(nw_vaddr);
2524 u32 rkey = ntohl(nw_rkey);
2528 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2529 if (lgr->lnk[link_idx].link_id == link_id) {
2536 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2537 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2540 /* add a new rtoken from peer */
2541 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2543 struct smc_link_group *lgr = smc_get_lgr(lnk);
2544 u64 dma_addr = be64_to_cpu(nw_vaddr);
2545 u32 rkey = ntohl(nw_rkey);
2548 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2549 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2550 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2551 test_bit(i, lgr->rtokens_used_mask)) {
2552 /* already in list */
2556 i = smc_rmb_reserve_rtoken_idx(lgr);
2559 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2560 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2564 /* delete an rtoken from all links */
2565 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2567 struct smc_link_group *lgr = smc_get_lgr(lnk);
2568 u32 rkey = ntohl(nw_rkey);
2571 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2572 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2573 test_bit(i, lgr->rtokens_used_mask)) {
2574 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2575 lgr->rtokens[i][j].rkey = 0;
2576 lgr->rtokens[i][j].dma_addr = 0;
2578 clear_bit(i, lgr->rtokens_used_mask);
2585 /* save rkey and dma_addr received from peer during clc handshake */
2586 int smc_rmb_rtoken_handling(struct smc_connection *conn,
2587 struct smc_link *lnk,
2588 struct smc_clc_msg_accept_confirm *clc)
2590 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2592 if (conn->rtoken_idx < 0)
2593 return conn->rtoken_idx;
2597 static void smc_core_going_away(void)
2599 struct smc_ib_device *smcibdev;
2600 struct smcd_dev *smcd;
2602 mutex_lock(&smc_ib_devices.mutex);
2603 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2606 for (i = 0; i < SMC_MAX_PORTS; i++)
2607 set_bit(i, smcibdev->ports_going_away);
2609 mutex_unlock(&smc_ib_devices.mutex);
2611 mutex_lock(&smcd_dev_list.mutex);
2612 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2613 smcd->going_away = 1;
2615 mutex_unlock(&smcd_dev_list.mutex);
2618 /* Clean up all SMC link groups */
2619 static void smc_lgrs_shutdown(void)
2621 struct smcd_dev *smcd;
2623 smc_core_going_away();
2625 smc_smcr_terminate_all(NULL);
2627 mutex_lock(&smcd_dev_list.mutex);
2628 list_for_each_entry(smcd, &smcd_dev_list.list, list)
2629 smc_smcd_terminate_all(smcd);
2630 mutex_unlock(&smcd_dev_list.mutex);
2633 static int smc_core_reboot_event(struct notifier_block *this,
2634 unsigned long event, void *ptr)
2636 smc_lgrs_shutdown();
2637 smc_ib_unregister_client();
2642 static struct notifier_block smc_reboot_notifier = {
2643 .notifier_call = smc_core_reboot_event,
2646 int __init smc_core_init(void)
2648 return register_reboot_notifier(&smc_reboot_notifier);
2651 /* Called (from smc_exit) when module is removed */
2652 void smc_core_exit(void)
2654 unregister_reboot_notifier(&smc_reboot_notifier);
2655 smc_lgrs_shutdown();