1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
21 #include <rdma/ib_verbs.h>
22 #include <rdma/ib_cache.h>
31 #include "smc_close.h"
34 #define SMC_LGR_NUM_INCR 256
35 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
36 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
38 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
51 static void smc_link_down_work(struct work_struct *work);
53 /* return head of link group list and its lock for a given link group */
54 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
55 spinlock_t **lgr_lock)
58 *lgr_lock = &lgr->smcd->lgr_lock;
59 return &lgr->smcd->lgr_list;
62 *lgr_lock = &smc_lgr_list.lock;
63 return &smc_lgr_list.list;
66 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
68 /* client link group creation always follows the server link group
69 * creation. For client use a somewhat higher removal delay time,
70 * otherwise there is a risk of out-of-sync link groups.
73 mod_delayed_work(system_wq, &lgr->free_work,
74 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
75 SMC_LGR_FREE_DELAY_CLNT :
76 SMC_LGR_FREE_DELAY_SERV);
80 /* Register connection's alert token in our lookup structure.
81 * To use rbtrees we have to implement our own insert core.
82 * Requires @conns_lock
83 * @smc connection to register
84 * Returns 0 on success, != otherwise.
86 static void smc_lgr_add_alert_token(struct smc_connection *conn)
88 struct rb_node **link, *parent = NULL;
89 u32 token = conn->alert_token_local;
91 link = &conn->lgr->conns_all.rb_node;
93 struct smc_connection *cur = rb_entry(*link,
94 struct smc_connection, alert_node);
97 if (cur->alert_token_local > token)
98 link = &parent->rb_left;
100 link = &parent->rb_right;
102 /* Put the new node there */
103 rb_link_node(&conn->alert_node, parent, link);
104 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
107 /* assign an SMC-R link to the connection */
108 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
110 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
114 /* do link balancing */
115 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
116 struct smc_link *lnk = &conn->lgr->lnk[i];
118 if (lnk->state != expected || lnk->link_is_asym)
120 if (conn->lgr->role == SMC_CLNT) {
121 conn->lnk = lnk; /* temporary, SMC server assigns link*/
124 if (conn->lgr->conns_num % 2) {
125 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
126 struct smc_link *lnk2;
128 lnk2 = &conn->lgr->lnk[j];
129 if (lnk2->state == expected &&
130 !lnk2->link_is_asym) {
141 return SMC_CLC_DECL_NOACTLINK;
145 /* Register connection in link group by assigning an alert token
146 * registered in a search tree.
147 * Requires @conns_lock
148 * Note that '0' is a reserved value and not assigned.
150 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
152 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
153 static atomic_t nexttoken = ATOMIC_INIT(0);
156 if (!conn->lgr->is_smcd) {
157 rc = smcr_lgr_conn_assign_link(conn, first);
161 /* find a new alert_token_local value not yet used by some connection
164 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
165 while (!conn->alert_token_local) {
166 conn->alert_token_local = atomic_inc_return(&nexttoken);
167 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
168 conn->alert_token_local = 0;
170 smc_lgr_add_alert_token(conn);
171 conn->lgr->conns_num++;
175 /* Unregister connection and reset the alert token of the given connection<
177 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
180 struct smc_link_group *lgr = conn->lgr;
182 rb_erase(&conn->alert_node, &lgr->conns_all);
184 conn->alert_token_local = 0;
185 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
188 /* Unregister connection from lgr
190 static void smc_lgr_unregister_conn(struct smc_connection *conn)
192 struct smc_link_group *lgr = conn->lgr;
196 write_lock_bh(&lgr->conns_lock);
197 if (conn->alert_token_local) {
198 __smc_lgr_unregister_conn(conn);
200 write_unlock_bh(&lgr->conns_lock);
204 void smc_lgr_cleanup_early(struct smc_connection *conn)
206 struct smc_link_group *lgr = conn->lgr;
207 spinlock_t *lgr_lock;
213 smc_lgr_list_head(lgr, &lgr_lock);
214 spin_lock_bh(lgr_lock);
215 /* do not use this link group for new connections */
216 if (!list_empty(&lgr->list))
217 list_del_init(&lgr->list);
218 spin_unlock_bh(lgr_lock);
219 __smc_lgr_terminate(lgr, true);
222 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
226 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
227 struct smc_link *lnk = &lgr->lnk[i];
229 if (smc_link_sendable(lnk))
230 lnk->state = SMC_LNK_INACTIVE;
232 wake_up_all(&lgr->llc_msg_waiter);
233 wake_up_all(&lgr->llc_flow_waiter);
236 static void smc_lgr_free(struct smc_link_group *lgr);
238 static void smc_lgr_free_work(struct work_struct *work)
240 struct smc_link_group *lgr = container_of(to_delayed_work(work),
241 struct smc_link_group,
243 spinlock_t *lgr_lock;
246 smc_lgr_list_head(lgr, &lgr_lock);
247 spin_lock_bh(lgr_lock);
249 spin_unlock_bh(lgr_lock);
252 read_lock_bh(&lgr->conns_lock);
253 conns = RB_EMPTY_ROOT(&lgr->conns_all);
254 read_unlock_bh(&lgr->conns_lock);
255 if (!conns) { /* number of lgr connections is no longer zero */
256 spin_unlock_bh(lgr_lock);
259 list_del_init(&lgr->list); /* remove from smc_lgr_list */
260 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
261 spin_unlock_bh(lgr_lock);
262 cancel_delayed_work(&lgr->free_work);
264 if (!lgr->is_smcd && !lgr->terminating)
265 smc_llc_send_link_delete_all(lgr, true,
266 SMC_LLC_DEL_PROG_INIT_TERM);
267 if (lgr->is_smcd && !lgr->terminating)
268 smc_ism_signal_shutdown(lgr);
270 smcr_lgr_link_deactivate_all(lgr);
274 static void smc_lgr_terminate_work(struct work_struct *work)
276 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
279 __smc_lgr_terminate(lgr, true);
282 /* return next unique link id for the lgr */
283 static u8 smcr_next_link_id(struct smc_link_group *lgr)
290 link_id = ++lgr->next_link_id;
291 if (!link_id) /* skip zero as link_id */
292 link_id = ++lgr->next_link_id;
293 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
294 if (smc_link_usable(&lgr->lnk[i]) &&
295 lgr->lnk[i].link_id == link_id)
303 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
304 u8 link_idx, struct smc_init_info *ini)
309 get_device(&ini->ib_dev->ibdev->dev);
310 atomic_inc(&ini->ib_dev->lnk_cnt);
311 lnk->link_id = smcr_next_link_id(lgr);
313 lnk->link_idx = link_idx;
314 lnk->smcibdev = ini->ib_dev;
315 lnk->ibport = ini->ib_port;
316 lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
317 smc_llc_link_set_uid(lnk);
318 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
319 if (!ini->ib_dev->initialized) {
320 rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
324 get_random_bytes(rndvec, sizeof(rndvec));
325 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
327 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
328 ini->vlan_id, lnk->gid, &lnk->sgid_index);
331 rc = smc_llc_link_init(lnk);
334 rc = smc_wr_alloc_link_mem(lnk);
337 rc = smc_ib_create_protection_domain(lnk);
340 rc = smc_ib_create_queue_pair(lnk);
343 rc = smc_wr_create_link(lnk);
346 lnk->state = SMC_LNK_ACTIVATING;
350 smc_ib_destroy_queue_pair(lnk);
352 smc_ib_dealloc_protection_domain(lnk);
354 smc_wr_free_link_mem(lnk);
356 smc_llc_link_clear(lnk, false);
358 put_device(&ini->ib_dev->ibdev->dev);
359 memset(lnk, 0, sizeof(struct smc_link));
360 lnk->state = SMC_LNK_UNUSED;
361 if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
362 wake_up(&ini->ib_dev->lnks_deleted);
366 /* create a new SMC link group */
367 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
369 struct smc_link_group *lgr;
370 struct list_head *lgr_list;
371 struct smc_link *lnk;
372 spinlock_t *lgr_lock;
377 if (ini->is_smcd && ini->vlan_id) {
378 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
380 rc = SMC_CLC_DECL_ISMVLANERR;
385 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
387 rc = SMC_CLC_DECL_MEM;
390 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
391 SMC_LGR_ID_SIZE, &lgr->id);
396 lgr->is_smcd = ini->is_smcd;
398 lgr->terminating = 0;
400 lgr->vlan_id = ini->vlan_id;
401 mutex_init(&lgr->sndbufs_lock);
402 mutex_init(&lgr->rmbs_lock);
403 rwlock_init(&lgr->conns_lock);
404 for (i = 0; i < SMC_RMBE_SIZES; i++) {
405 INIT_LIST_HEAD(&lgr->sndbufs[i]);
406 INIT_LIST_HEAD(&lgr->rmbs[i]);
408 lgr->next_link_id = 0;
409 smc_lgr_list.num += SMC_LGR_NUM_INCR;
410 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
411 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
412 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
413 lgr->conns_all = RB_ROOT;
415 /* SMC-D specific settings */
416 get_device(&ini->ism_dev[ini->ism_selected]->dev);
417 lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
418 lgr->smcd = ini->ism_dev[ini->ism_selected];
419 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
420 lgr_lock = &lgr->smcd->lgr_lock;
421 lgr->smc_version = ini->smcd_version;
422 lgr->peer_shutdown = 0;
423 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
425 /* SMC-R specific settings */
426 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
427 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
429 memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
431 smc_llc_lgr_init(lgr, smc);
433 link_idx = SMC_SINGLE_LINK;
434 lnk = &lgr->lnk[link_idx];
435 rc = smcr_link_init(lgr, lnk, link_idx, ini);
438 lgr_list = &smc_lgr_list.list;
439 lgr_lock = &smc_lgr_list.lock;
440 atomic_inc(&lgr_cnt);
443 spin_lock_bh(lgr_lock);
444 list_add_tail(&lgr->list, lgr_list);
445 spin_unlock_bh(lgr_lock);
449 destroy_workqueue(lgr->tx_wq);
453 if (ini->is_smcd && ini->vlan_id)
454 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
458 rc = SMC_CLC_DECL_MEM;
460 rc = SMC_CLC_DECL_INTERR;
465 static int smc_write_space(struct smc_connection *conn)
467 int buffer_len = conn->peer_rmbe_size;
468 union smc_host_cursor prod;
469 union smc_host_cursor cons;
472 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
473 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
474 /* determine rx_buf space */
475 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
479 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
480 struct smc_wr_buf *wr_buf)
482 struct smc_connection *conn = &smc->conn;
483 union smc_host_cursor cons, fin;
487 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
488 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
489 /* set prod cursor to old state, enforce tx_rdma_writes() */
490 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
491 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
493 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
494 /* cons cursor advanced more than fin, and prod was set
495 * fin above, so now prod is smaller than cons. Fix that.
497 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
498 smc_curs_add(conn->sndbuf_desc->len,
499 &conn->tx_curs_sent, diff);
500 smc_curs_add(conn->sndbuf_desc->len,
501 &conn->tx_curs_fin, diff);
503 smp_mb__before_atomic();
504 atomic_add(diff, &conn->sndbuf_space);
505 smp_mb__after_atomic();
507 smc_curs_add(conn->peer_rmbe_size,
508 &conn->local_tx_ctrl.prod, diff);
509 smc_curs_add(conn->peer_rmbe_size,
510 &conn->local_tx_ctrl_fin, diff);
512 /* recalculate, value is used by tx_rdma_writes() */
513 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
515 if (smc->sk.sk_state != SMC_INIT &&
516 smc->sk.sk_state != SMC_CLOSED) {
517 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
519 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
520 smc->sk.sk_data_ready(&smc->sk);
523 smc_wr_tx_put_slot(conn->lnk,
524 (struct smc_wr_tx_pend_priv *)pend);
529 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
530 struct smc_link *from_lnk, bool is_dev_err)
532 struct smc_link *to_lnk = NULL;
533 struct smc_cdc_tx_pend *pend;
534 struct smc_connection *conn;
535 struct smc_wr_buf *wr_buf;
536 struct smc_sock *smc;
537 struct rb_node *node;
540 /* link is inactive, wake up tx waiters */
541 smc_wr_wakeup_tx_wait(from_lnk);
543 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
544 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
546 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
547 from_lnk->ibport == lgr->lnk[i].ibport) {
550 to_lnk = &lgr->lnk[i];
553 if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
554 smc_lgr_terminate_sched(lgr);
558 read_lock_bh(&lgr->conns_lock);
559 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
560 conn = rb_entry(node, struct smc_connection, alert_node);
561 if (conn->lnk != from_lnk)
563 smc = container_of(conn, struct smc_sock, conn);
564 /* conn->lnk not yet set in SMC_INIT state */
565 if (smc->sk.sk_state == SMC_INIT)
567 if (smc->sk.sk_state == SMC_CLOSED ||
568 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
569 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
570 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
571 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
572 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
573 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
574 smc->sk.sk_state == SMC_PEERABORTWAIT ||
575 smc->sk.sk_state == SMC_PROCESSABORT) {
576 spin_lock_bh(&conn->send_lock);
578 spin_unlock_bh(&conn->send_lock);
582 read_unlock_bh(&lgr->conns_lock);
583 /* pre-fetch buffer outside of send_lock, might sleep */
584 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
587 /* avoid race with smcr_tx_sndbuf_nonempty() */
588 spin_lock_bh(&conn->send_lock);
590 rc = smc_switch_cursor(smc, pend, wr_buf);
591 spin_unlock_bh(&conn->send_lock);
597 read_unlock_bh(&lgr->conns_lock);
598 smc_wr_tx_link_put(to_lnk);
602 smcr_link_down_cond_sched(to_lnk);
603 smc_wr_tx_link_put(to_lnk);
607 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
608 struct smc_link_group *lgr)
612 if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
613 /* unregister rmb with peer */
614 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
616 /* protect against smc_llc_cli_rkey_exchange() */
617 mutex_lock(&lgr->llc_conf_mutex);
618 smc_llc_do_delete_rkey(lgr, rmb_desc);
619 rmb_desc->is_conf_rkey = false;
620 mutex_unlock(&lgr->llc_conf_mutex);
621 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
625 if (rmb_desc->is_reg_err) {
626 /* buf registration failed, reuse not possible */
627 mutex_lock(&lgr->rmbs_lock);
628 list_del(&rmb_desc->list);
629 mutex_unlock(&lgr->rmbs_lock);
631 smc_buf_free(lgr, true, rmb_desc);
637 static void smc_buf_unuse(struct smc_connection *conn,
638 struct smc_link_group *lgr)
640 if (conn->sndbuf_desc)
641 conn->sndbuf_desc->used = 0;
642 if (conn->rmb_desc && lgr->is_smcd)
643 conn->rmb_desc->used = 0;
644 else if (conn->rmb_desc)
645 smcr_buf_unuse(conn->rmb_desc, lgr);
648 /* remove a finished connection from its link group */
649 void smc_conn_free(struct smc_connection *conn)
651 struct smc_link_group *lgr = conn->lgr;
656 if (!list_empty(&lgr->list))
657 smc_ism_unset_conn(conn);
658 tasklet_kill(&conn->rx_tsklet);
660 smc_cdc_wait_pend_tx_wr(conn);
661 if (current_work() != &conn->abort_work)
662 cancel_work_sync(&conn->abort_work);
664 if (!list_empty(&lgr->list)) {
665 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
666 smc_lgr_unregister_conn(conn);
670 smc_lgr_schedule_free_work(lgr);
673 /* unregister a link from a buf_desc */
674 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
675 struct smc_link *lnk)
678 buf_desc->is_reg_mr[lnk->link_idx] = false;
679 if (!buf_desc->is_map_ib[lnk->link_idx])
682 if (buf_desc->mr_rx[lnk->link_idx]) {
683 smc_ib_put_memory_region(
684 buf_desc->mr_rx[lnk->link_idx]);
685 buf_desc->mr_rx[lnk->link_idx] = NULL;
687 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
689 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
691 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
692 buf_desc->is_map_ib[lnk->link_idx] = false;
695 /* unmap all buffers of lgr for a deleted link */
696 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
698 struct smc_link_group *lgr = lnk->lgr;
699 struct smc_buf_desc *buf_desc, *bf;
702 for (i = 0; i < SMC_RMBE_SIZES; i++) {
703 mutex_lock(&lgr->rmbs_lock);
704 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
705 smcr_buf_unmap_link(buf_desc, true, lnk);
706 mutex_unlock(&lgr->rmbs_lock);
707 mutex_lock(&lgr->sndbufs_lock);
708 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
710 smcr_buf_unmap_link(buf_desc, false, lnk);
711 mutex_unlock(&lgr->sndbufs_lock);
715 static void smcr_rtoken_clear_link(struct smc_link *lnk)
717 struct smc_link_group *lgr = lnk->lgr;
720 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
721 lgr->rtokens[i][lnk->link_idx].rkey = 0;
722 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
726 /* must be called under lgr->llc_conf_mutex lock */
727 void smcr_link_clear(struct smc_link *lnk, bool log)
729 struct smc_ib_device *smcibdev;
731 if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
734 smc_llc_link_clear(lnk, log);
735 smcr_buf_unmap_lgr(lnk);
736 smcr_rtoken_clear_link(lnk);
737 smc_ib_modify_qp_error(lnk);
738 smc_wr_free_link(lnk);
739 smc_ib_destroy_queue_pair(lnk);
740 smc_ib_dealloc_protection_domain(lnk);
741 smc_wr_free_link_mem(lnk);
742 put_device(&lnk->smcibdev->ibdev->dev);
743 smcibdev = lnk->smcibdev;
744 memset(lnk, 0, sizeof(struct smc_link));
745 lnk->state = SMC_LNK_UNUSED;
746 if (!atomic_dec_return(&smcibdev->lnk_cnt))
747 wake_up(&smcibdev->lnks_deleted);
750 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
751 struct smc_buf_desc *buf_desc)
755 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
756 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
759 __free_pages(buf_desc->pages, buf_desc->order);
763 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
764 struct smc_buf_desc *buf_desc)
767 /* restore original buf len */
768 buf_desc->len += sizeof(struct smcd_cdc_msg);
769 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
771 kfree(buf_desc->cpu_addr);
776 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
777 struct smc_buf_desc *buf_desc)
780 smcd_buf_free(lgr, is_rmb, buf_desc);
782 smcr_buf_free(lgr, is_rmb, buf_desc);
785 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
787 struct smc_buf_desc *buf_desc, *bf_desc;
788 struct list_head *buf_list;
791 for (i = 0; i < SMC_RMBE_SIZES; i++) {
793 buf_list = &lgr->rmbs[i];
795 buf_list = &lgr->sndbufs[i];
796 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
798 list_del(&buf_desc->list);
799 smc_buf_free(lgr, is_rmb, buf_desc);
804 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
806 /* free send buffers */
807 __smc_lgr_free_bufs(lgr, false);
809 __smc_lgr_free_bufs(lgr, true);
812 /* remove a link group */
813 static void smc_lgr_free(struct smc_link_group *lgr)
818 mutex_lock(&lgr->llc_conf_mutex);
819 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
820 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
821 smcr_link_clear(&lgr->lnk[i], false);
823 mutex_unlock(&lgr->llc_conf_mutex);
824 smc_llc_lgr_clear(lgr);
827 smc_lgr_free_bufs(lgr);
828 destroy_workqueue(lgr->tx_wq);
830 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
831 put_device(&lgr->smcd->dev);
832 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
833 wake_up(&lgr->smcd->lgrs_deleted);
835 if (!atomic_dec_return(&lgr_cnt))
836 wake_up(&lgrs_deleted);
841 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
845 for (i = 0; i < SMC_RMBE_SIZES; i++) {
846 struct smc_buf_desc *buf_desc;
848 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
849 buf_desc->len += sizeof(struct smcd_cdc_msg);
850 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
855 static void smc_sk_wake_ups(struct smc_sock *smc)
857 smc->sk.sk_write_space(&smc->sk);
858 smc->sk.sk_data_ready(&smc->sk);
859 smc->sk.sk_state_change(&smc->sk);
862 /* kill a connection */
863 static void smc_conn_kill(struct smc_connection *conn, bool soft)
865 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
867 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
868 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
870 smc_close_abort(conn);
872 smc->sk.sk_err = ECONNABORTED;
873 smc_sk_wake_ups(smc);
874 if (conn->lgr->is_smcd) {
875 smc_ism_unset_conn(conn);
877 tasklet_kill(&conn->rx_tsklet);
879 tasklet_unlock_wait(&conn->rx_tsklet);
881 smc_cdc_wait_pend_tx_wr(conn);
883 smc_lgr_unregister_conn(conn);
884 smc_close_active_abort(smc);
887 static void smc_lgr_cleanup(struct smc_link_group *lgr)
890 smc_ism_signal_shutdown(lgr);
891 smcd_unregister_all_dmbs(lgr);
893 u32 rsn = lgr->llc_termination_rsn;
896 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
897 smc_llc_send_link_delete_all(lgr, false, rsn);
898 smcr_lgr_link_deactivate_all(lgr);
902 /* terminate link group
903 * @soft: true if link group shutdown can take its time
904 * false if immediate link group shutdown is required
906 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
908 struct smc_connection *conn;
909 struct smc_sock *smc;
910 struct rb_node *node;
912 if (lgr->terminating)
913 return; /* lgr already terminating */
914 /* cancel free_work sync, will terminate when lgr->freeing is set */
915 cancel_delayed_work(&lgr->free_work);
916 lgr->terminating = 1;
918 /* kill remaining link group connections */
919 read_lock_bh(&lgr->conns_lock);
920 node = rb_first(&lgr->conns_all);
922 read_unlock_bh(&lgr->conns_lock);
923 conn = rb_entry(node, struct smc_connection, alert_node);
924 smc = container_of(conn, struct smc_sock, conn);
925 sock_hold(&smc->sk); /* sock_put below */
927 smc_conn_kill(conn, soft);
928 release_sock(&smc->sk);
929 sock_put(&smc->sk); /* sock_hold above */
930 read_lock_bh(&lgr->conns_lock);
931 node = rb_first(&lgr->conns_all);
933 read_unlock_bh(&lgr->conns_lock);
934 smc_lgr_cleanup(lgr);
938 /* unlink link group and schedule termination */
939 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
941 spinlock_t *lgr_lock;
943 smc_lgr_list_head(lgr, &lgr_lock);
944 spin_lock_bh(lgr_lock);
945 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
946 spin_unlock_bh(lgr_lock);
947 return; /* lgr already terminating */
949 list_del_init(&lgr->list);
951 spin_unlock_bh(lgr_lock);
952 schedule_work(&lgr->terminate_work);
955 /* Called when peer lgr shutdown (regularly or abnormally) is received */
956 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
958 struct smc_link_group *lgr, *l;
959 LIST_HEAD(lgr_free_list);
961 /* run common cleanup function and build free list */
962 spin_lock_bh(&dev->lgr_lock);
963 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
964 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
965 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
966 if (peer_gid) /* peer triggered termination */
967 lgr->peer_shutdown = 1;
968 list_move(&lgr->list, &lgr_free_list);
972 spin_unlock_bh(&dev->lgr_lock);
974 /* cancel the regular free workers and actually free lgrs */
975 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
976 list_del_init(&lgr->list);
977 schedule_work(&lgr->terminate_work);
981 /* Called when an SMCD device is removed or the smc module is unloaded */
982 void smc_smcd_terminate_all(struct smcd_dev *smcd)
984 struct smc_link_group *lgr, *lg;
985 LIST_HEAD(lgr_free_list);
987 spin_lock_bh(&smcd->lgr_lock);
988 list_splice_init(&smcd->lgr_list, &lgr_free_list);
989 list_for_each_entry(lgr, &lgr_free_list, list)
991 spin_unlock_bh(&smcd->lgr_lock);
993 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
994 list_del_init(&lgr->list);
995 __smc_lgr_terminate(lgr, false);
998 if (atomic_read(&smcd->lgr_cnt))
999 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1002 /* Called when an SMCR device is removed or the smc module is unloaded.
1003 * If smcibdev is given, all SMCR link groups using this device are terminated.
1004 * If smcibdev is NULL, all SMCR link groups are terminated.
1006 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1008 struct smc_link_group *lgr, *lg;
1009 LIST_HEAD(lgr_free_list);
1012 spin_lock_bh(&smc_lgr_list.lock);
1014 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1015 list_for_each_entry(lgr, &lgr_free_list, list)
1018 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1019 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1020 if (lgr->lnk[i].smcibdev == smcibdev)
1021 smcr_link_down_cond_sched(&lgr->lnk[i]);
1025 spin_unlock_bh(&smc_lgr_list.lock);
1027 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1028 list_del_init(&lgr->list);
1029 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1030 __smc_lgr_terminate(lgr, false);
1034 if (atomic_read(&smcibdev->lnk_cnt))
1035 wait_event(smcibdev->lnks_deleted,
1036 !atomic_read(&smcibdev->lnk_cnt));
1038 if (atomic_read(&lgr_cnt))
1039 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1043 /* set new lgr type and clear all asymmetric link tagging */
1044 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1046 char *lgr_type = "";
1049 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1050 if (smc_link_usable(&lgr->lnk[i]))
1051 lgr->lnk[i].link_is_asym = false;
1052 if (lgr->type == new_type)
1054 lgr->type = new_type;
1056 switch (lgr->type) {
1060 case SMC_LGR_SINGLE:
1061 lgr_type = "SINGLE";
1063 case SMC_LGR_SYMMETRIC:
1064 lgr_type = "SYMMETRIC";
1066 case SMC_LGR_ASYMMETRIC_PEER:
1067 lgr_type = "ASYMMETRIC_PEER";
1069 case SMC_LGR_ASYMMETRIC_LOCAL:
1070 lgr_type = "ASYMMETRIC_LOCAL";
1073 pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1074 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1075 lgr_type, lgr->pnet_id);
1078 /* set new lgr type and tag a link as asymmetric */
1079 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1080 enum smc_lgr_type new_type, int asym_lnk_idx)
1082 smcr_lgr_set_type(lgr, new_type);
1083 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1086 /* abort connection, abort_work scheduled from tasklet context */
1087 static void smc_conn_abort_work(struct work_struct *work)
1089 struct smc_connection *conn = container_of(work,
1090 struct smc_connection,
1092 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1094 lock_sock(&smc->sk);
1095 smc_conn_kill(conn, true);
1096 release_sock(&smc->sk);
1097 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1100 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1102 struct smc_link_group *lgr, *n;
1104 spin_lock_bh(&smc_lgr_list.lock);
1105 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1106 struct smc_link *link;
1108 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1109 SMC_MAX_PNETID_LEN) ||
1110 lgr->type == SMC_LGR_SYMMETRIC ||
1111 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1114 /* trigger local add link processing */
1115 link = smc_llc_usable_link(lgr);
1117 smc_llc_add_link_local(link);
1119 spin_unlock_bh(&smc_lgr_list.lock);
1122 /* link is down - switch connections to alternate link,
1123 * must be called under lgr->llc_conf_mutex lock
1125 static void smcr_link_down(struct smc_link *lnk)
1127 struct smc_link_group *lgr = lnk->lgr;
1128 struct smc_link *to_lnk;
1131 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1134 to_lnk = smc_switch_conns(lgr, lnk, true);
1135 if (!to_lnk) { /* no backup link available */
1136 smcr_link_clear(lnk, true);
1139 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1140 del_link_id = lnk->link_id;
1142 if (lgr->role == SMC_SERV) {
1143 /* trigger local delete link processing */
1144 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1146 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1147 /* another llc task is ongoing */
1148 mutex_unlock(&lgr->llc_conf_mutex);
1149 wait_event_timeout(lgr->llc_flow_waiter,
1150 (list_empty(&lgr->list) ||
1151 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1153 mutex_lock(&lgr->llc_conf_mutex);
1155 if (!list_empty(&lgr->list)) {
1156 smc_llc_send_delete_link(to_lnk, del_link_id,
1158 SMC_LLC_DEL_LOST_PATH);
1159 smcr_link_clear(lnk, true);
1161 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1165 /* must be called under lgr->llc_conf_mutex lock */
1166 void smcr_link_down_cond(struct smc_link *lnk)
1168 if (smc_link_downing(&lnk->state))
1169 smcr_link_down(lnk);
1172 /* will get the lgr->llc_conf_mutex lock */
1173 void smcr_link_down_cond_sched(struct smc_link *lnk)
1175 if (smc_link_downing(&lnk->state))
1176 schedule_work(&lnk->link_down_wrk);
1179 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1181 struct smc_link_group *lgr, *n;
1184 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1185 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1186 SMC_MAX_PNETID_LEN))
1187 continue; /* lgr is not affected */
1188 if (list_empty(&lgr->list))
1190 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1191 struct smc_link *lnk = &lgr->lnk[i];
1193 if (smc_link_usable(lnk) &&
1194 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1195 smcr_link_down_cond_sched(lnk);
1200 static void smc_link_down_work(struct work_struct *work)
1202 struct smc_link *link = container_of(work, struct smc_link,
1204 struct smc_link_group *lgr = link->lgr;
1206 if (list_empty(&lgr->list))
1208 wake_up_all(&lgr->llc_msg_waiter);
1209 mutex_lock(&lgr->llc_conf_mutex);
1210 smcr_link_down(link);
1211 mutex_unlock(&lgr->llc_conf_mutex);
1214 static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1215 struct netdev_nested_priv *priv)
1217 unsigned short *vlan_id = (unsigned short *)priv->data;
1219 if (is_vlan_dev(lower_dev)) {
1220 *vlan_id = vlan_dev_vlan_id(lower_dev);
1227 /* Determine vlan of internal TCP socket. */
1228 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1230 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1231 struct netdev_nested_priv priv;
1232 struct net_device *ndev;
1246 if (is_vlan_dev(ndev)) {
1247 ini->vlan_id = vlan_dev_vlan_id(ndev);
1251 priv.data = (void *)&ini->vlan_id;
1253 netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1262 static bool smcr_lgr_match(struct smc_link_group *lgr,
1263 struct smc_clc_msg_local *lcl,
1264 enum smc_lgr_role role, u32 clcqpn)
1268 if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1272 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1273 if (!smc_link_active(&lgr->lnk[i]))
1275 if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1276 !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1277 !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1283 static bool smcd_lgr_match(struct smc_link_group *lgr,
1284 struct smcd_dev *smcismdev, u64 peer_gid)
1286 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1289 /* create a new SMC connection (and a new link group if necessary) */
1290 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1292 struct smc_connection *conn = &smc->conn;
1293 struct list_head *lgr_list;
1294 struct smc_link_group *lgr;
1295 enum smc_lgr_role role;
1296 spinlock_t *lgr_lock;
1299 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1301 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1303 ini->first_contact_local = 1;
1304 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1305 if (role == SMC_CLNT && ini->first_contact_peer)
1306 /* create new link group as well */
1309 /* determine if an existing link group can be reused */
1310 spin_lock_bh(lgr_lock);
1311 list_for_each_entry(lgr, lgr_list, list) {
1312 write_lock_bh(&lgr->conns_lock);
1314 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1315 ini->ism_peer_gid[ini->ism_selected]) :
1316 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
1318 (ini->smcd_version == SMC_V2 ||
1319 lgr->vlan_id == ini->vlan_id) &&
1320 (role == SMC_CLNT || ini->is_smcd ||
1321 (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
1322 !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
1323 /* link group found */
1324 ini->first_contact_local = 0;
1326 rc = smc_lgr_register_conn(conn, false);
1327 write_unlock_bh(&lgr->conns_lock);
1328 if (!rc && delayed_work_pending(&lgr->free_work))
1329 cancel_delayed_work(&lgr->free_work);
1332 write_unlock_bh(&lgr->conns_lock);
1334 spin_unlock_bh(lgr_lock);
1338 if (role == SMC_CLNT && !ini->first_contact_peer &&
1339 ini->first_contact_local) {
1340 /* Server reuses a link group, but Client wants to start
1342 * send out_of_sync decline, reason synchr. error
1344 return SMC_CLC_DECL_SYNCERR;
1348 if (ini->first_contact_local) {
1349 rc = smc_lgr_create(smc, ini);
1353 write_lock_bh(&lgr->conns_lock);
1354 rc = smc_lgr_register_conn(conn, true);
1355 write_unlock_bh(&lgr->conns_lock);
1359 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1360 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1361 conn->urg_state = SMC_URG_READ;
1362 init_waitqueue_head(&conn->cdc_pend_tx_wq);
1363 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1365 conn->rx_off = sizeof(struct smcd_cdc_msg);
1366 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1370 #ifndef KERNEL_HAS_ATOMIC64
1371 spin_lock_init(&conn->acurs_lock);
1378 /* convert the RMB size into the compressed notation - minimum 16K.
1379 * In contrast to plain ilog2, this rounds towards the next power of 2,
1380 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1382 static u8 smc_compress_bufsize(int size)
1386 if (size <= SMC_BUF_MIN_SIZE)
1389 size = (size - 1) >> 14;
1390 compressed = ilog2(size) + 1;
1391 if (compressed >= SMC_RMBE_SIZES)
1392 compressed = SMC_RMBE_SIZES - 1;
1396 /* convert the RMB size from compressed notation into integer */
1397 int smc_uncompress_bufsize(u8 compressed)
1401 size = 0x00000001 << (((int)compressed) + 14);
1405 /* try to reuse a sndbuf or rmb description slot for a certain
1406 * buffer size; if not available, return NULL
1408 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1410 struct list_head *buf_list)
1412 struct smc_buf_desc *buf_slot;
1415 list_for_each_entry(buf_slot, buf_list, list) {
1416 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1425 /* one of the conditions for announcing a receiver's current window size is
1426 * that it "results in a minimum increase in the window size of 10% of the
1427 * receive buffer space" [RFC7609]
1429 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
1431 return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1434 /* map an rmb buf to a link */
1435 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1436 struct smc_link *lnk)
1440 if (buf_desc->is_map_ib[lnk->link_idx])
1443 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1446 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1447 buf_desc->cpu_addr, buf_desc->len);
1449 /* map sg table to DMA address */
1450 rc = smc_ib_buf_map_sg(lnk, buf_desc,
1451 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1452 /* SMC protocol depends on mapping to one DMA address only */
1458 /* create a new memory region for the RMB */
1460 rc = smc_ib_get_memory_region(lnk->roce_pd,
1461 IB_ACCESS_REMOTE_WRITE |
1462 IB_ACCESS_LOCAL_WRITE,
1463 buf_desc, lnk->link_idx);
1466 smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1468 buf_desc->is_map_ib[lnk->link_idx] = true;
1472 smc_ib_buf_unmap_sg(lnk, buf_desc,
1473 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1475 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1479 /* register a new rmb on IB device,
1480 * must be called under lgr->llc_conf_mutex lock
1482 int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1484 if (list_empty(&link->lgr->list))
1486 if (!rmb_desc->is_reg_mr[link->link_idx]) {
1487 /* register memory region for new rmb */
1488 if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1489 rmb_desc->is_reg_err = true;
1492 rmb_desc->is_reg_mr[link->link_idx] = true;
1497 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1498 struct list_head *lst, bool is_rmb)
1500 struct smc_buf_desc *buf_desc, *bf;
1504 list_for_each_entry_safe(buf_desc, bf, lst, list) {
1505 if (!buf_desc->used)
1507 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1516 /* map all used buffers of lgr for a new link */
1517 int smcr_buf_map_lgr(struct smc_link *lnk)
1519 struct smc_link_group *lgr = lnk->lgr;
1522 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1523 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1524 &lgr->rmbs[i], true);
1527 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1528 &lgr->sndbufs[i], false);
1535 /* register all used buffers of lgr for a new link,
1536 * must be called under lgr->llc_conf_mutex lock
1538 int smcr_buf_reg_lgr(struct smc_link *lnk)
1540 struct smc_link_group *lgr = lnk->lgr;
1541 struct smc_buf_desc *buf_desc, *bf;
1544 mutex_lock(&lgr->rmbs_lock);
1545 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1546 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1547 if (!buf_desc->used)
1549 rc = smcr_link_reg_rmb(lnk, buf_desc);
1555 mutex_unlock(&lgr->rmbs_lock);
1559 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
1560 bool is_rmb, int bufsize)
1562 struct smc_buf_desc *buf_desc;
1564 /* try to alloc a new buffer */
1565 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1567 return ERR_PTR(-ENOMEM);
1569 buf_desc->order = get_order(bufsize);
1570 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
1571 __GFP_NOMEMALLOC | __GFP_COMP |
1572 __GFP_NORETRY | __GFP_ZERO,
1574 if (!buf_desc->pages) {
1576 return ERR_PTR(-EAGAIN);
1578 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1579 buf_desc->len = bufsize;
1583 /* map buf_desc on all usable links,
1584 * unused buffers stay mapped as long as the link is up
1586 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1587 struct smc_buf_desc *buf_desc, bool is_rmb)
1589 int i, rc = 0, cnt = 0;
1591 /* protect against parallel link reconfiguration */
1592 mutex_lock(&lgr->llc_conf_mutex);
1593 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1594 struct smc_link *lnk = &lgr->lnk[i];
1596 if (!smc_link_usable(lnk))
1598 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1605 mutex_unlock(&lgr->llc_conf_mutex);
1611 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1613 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1614 bool is_dmb, int bufsize)
1616 struct smc_buf_desc *buf_desc;
1619 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1620 return ERR_PTR(-EAGAIN);
1622 /* try to alloc a new DMB */
1623 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1625 return ERR_PTR(-ENOMEM);
1627 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1631 return ERR_PTR(-EAGAIN);
1633 return ERR_PTR(-ENOSPC);
1634 return ERR_PTR(-EIO);
1636 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1637 /* CDC header stored in buf. So, pretend it was smaller */
1638 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1640 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1641 __GFP_NOWARN | __GFP_NORETRY |
1643 if (!buf_desc->cpu_addr) {
1645 return ERR_PTR(-EAGAIN);
1647 buf_desc->len = bufsize;
1652 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1654 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1655 struct smc_connection *conn = &smc->conn;
1656 struct smc_link_group *lgr = conn->lgr;
1657 struct list_head *buf_list;
1658 int bufsize, bufsize_short;
1659 struct mutex *lock; /* lock buffer list */
1663 /* use socket recv buffer size (w/o overhead) as start value */
1664 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1666 /* use socket send buffer size (w/o overhead) as start value */
1667 sk_buf_size = smc->sk.sk_sndbuf / 2;
1669 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1670 bufsize_short >= 0; bufsize_short--) {
1673 lock = &lgr->rmbs_lock;
1674 buf_list = &lgr->rmbs[bufsize_short];
1676 lock = &lgr->sndbufs_lock;
1677 buf_list = &lgr->sndbufs[bufsize_short];
1679 bufsize = smc_uncompress_bufsize(bufsize_short);
1680 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1683 /* check for reusable slot in the link group */
1684 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1686 memset(buf_desc->cpu_addr, 0, bufsize);
1687 break; /* found reusable slot */
1691 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1693 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1695 if (PTR_ERR(buf_desc) == -ENOMEM)
1697 if (IS_ERR(buf_desc))
1702 list_add(&buf_desc->list, buf_list);
1707 if (IS_ERR(buf_desc))
1708 return PTR_ERR(buf_desc);
1711 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1712 smcr_buf_unuse(buf_desc, lgr);
1718 conn->rmb_desc = buf_desc;
1719 conn->rmbe_size_short = bufsize_short;
1720 smc->sk.sk_rcvbuf = bufsize * 2;
1721 atomic_set(&conn->bytes_to_rcv, 0);
1722 conn->rmbe_update_limit =
1723 smc_rmb_wnd_update_limit(buf_desc->len);
1725 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1727 conn->sndbuf_desc = buf_desc;
1728 smc->sk.sk_sndbuf = bufsize * 2;
1729 atomic_set(&conn->sndbuf_space, bufsize);
1734 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1736 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
1738 smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1741 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1743 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
1745 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1748 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1752 if (!conn->lgr || conn->lgr->is_smcd)
1754 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1755 if (!smc_link_active(&conn->lgr->lnk[i]))
1757 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
1762 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1766 if (!conn->lgr || conn->lgr->is_smcd)
1768 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1769 if (!smc_link_active(&conn->lgr->lnk[i]))
1771 smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
1776 /* create the send and receive buffer for an SMC socket;
1777 * receive buffers are called RMBs;
1778 * (even though the SMC protocol allows more than one RMB-element per RMB,
1779 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1780 * extra RMB for every connection in a link group
1782 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1786 /* create send buffer */
1787 rc = __smc_buf_create(smc, is_smcd, false);
1791 rc = __smc_buf_create(smc, is_smcd, true);
1793 mutex_lock(&smc->conn.lgr->sndbufs_lock);
1794 list_del(&smc->conn.sndbuf_desc->list);
1795 mutex_unlock(&smc->conn.lgr->sndbufs_lock);
1796 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1797 smc->conn.sndbuf_desc = NULL;
1802 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1806 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1807 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1813 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
1818 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1819 if (test_bit(i, lgr->rtokens_used_mask) &&
1820 lgr->rtokens[i][lnk_idx].rkey == rkey)
1826 /* set rtoken for a new link to an existing rmb */
1827 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
1828 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
1832 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
1833 if (rtok_idx == -ENOENT)
1835 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
1836 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
1839 /* set rtoken for a new link whose link_id is given */
1840 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
1841 __be64 nw_vaddr, __be32 nw_rkey)
1843 u64 dma_addr = be64_to_cpu(nw_vaddr);
1844 u32 rkey = ntohl(nw_rkey);
1848 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
1849 if (lgr->lnk[link_idx].link_id == link_id) {
1856 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
1857 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
1860 /* add a new rtoken from peer */
1861 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1863 struct smc_link_group *lgr = smc_get_lgr(lnk);
1864 u64 dma_addr = be64_to_cpu(nw_vaddr);
1865 u32 rkey = ntohl(nw_rkey);
1868 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1869 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1870 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
1871 test_bit(i, lgr->rtokens_used_mask)) {
1872 /* already in list */
1876 i = smc_rmb_reserve_rtoken_idx(lgr);
1879 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
1880 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
1884 /* delete an rtoken from all links */
1885 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
1887 struct smc_link_group *lgr = smc_get_lgr(lnk);
1888 u32 rkey = ntohl(nw_rkey);
1891 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1892 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1893 test_bit(i, lgr->rtokens_used_mask)) {
1894 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
1895 lgr->rtokens[i][j].rkey = 0;
1896 lgr->rtokens[i][j].dma_addr = 0;
1898 clear_bit(i, lgr->rtokens_used_mask);
1905 /* save rkey and dma_addr received from peer during clc handshake */
1906 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1907 struct smc_link *lnk,
1908 struct smc_clc_msg_accept_confirm *clc)
1910 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
1912 if (conn->rtoken_idx < 0)
1913 return conn->rtoken_idx;
1917 static void smc_core_going_away(void)
1919 struct smc_ib_device *smcibdev;
1920 struct smcd_dev *smcd;
1922 mutex_lock(&smc_ib_devices.mutex);
1923 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1926 for (i = 0; i < SMC_MAX_PORTS; i++)
1927 set_bit(i, smcibdev->ports_going_away);
1929 mutex_unlock(&smc_ib_devices.mutex);
1931 mutex_lock(&smcd_dev_list.mutex);
1932 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1933 smcd->going_away = 1;
1935 mutex_unlock(&smcd_dev_list.mutex);
1938 /* Clean up all SMC link groups */
1939 static void smc_lgrs_shutdown(void)
1941 struct smcd_dev *smcd;
1943 smc_core_going_away();
1945 smc_smcr_terminate_all(NULL);
1947 mutex_lock(&smcd_dev_list.mutex);
1948 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1949 smc_smcd_terminate_all(smcd);
1950 mutex_unlock(&smcd_dev_list.mutex);
1953 static int smc_core_reboot_event(struct notifier_block *this,
1954 unsigned long event, void *ptr)
1956 smc_lgrs_shutdown();
1957 smc_ib_unregister_client();
1961 static struct notifier_block smc_reboot_notifier = {
1962 .notifier_call = smc_core_reboot_event,
1965 int __init smc_core_init(void)
1967 return register_reboot_notifier(&smc_reboot_notifier);
1970 /* Called (from smc_exit) when module is removed */
1971 void smc_core_exit(void)
1973 unregister_reboot_notifier(&smc_reboot_notifier);
1974 smc_lgrs_shutdown();