1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
7 * Copy user space data into send buffer, if send buffer space available.
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
11 * Copyright IBM Corp. 2016
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
30 #define SMC_TX_WORK_DELAY HZ
31 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
33 /***************************** sndbuf producer *******************************/
35 /* callback implementation for sk.sk_write_space()
36 * to wakeup sndbuf producers that blocked with smc_tx_wait().
37 * called under sk_socket lock.
39 static void smc_tx_write_space(struct sock *sk)
41 struct socket *sock = sk->sk_socket;
42 struct smc_sock *smc = smc_sk(sk);
45 /* similar to sk_stream_write_space */
46 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
47 clear_bit(SOCK_NOSPACE, &sock->flags);
49 wq = rcu_dereference(sk->sk_wq);
50 if (skwq_has_sleeper(wq))
51 wake_up_interruptible_poll(&wq->wait,
52 EPOLLOUT | EPOLLWRNORM |
54 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
55 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
60 /* Wakeup sndbuf producers that blocked with smc_tx_wait().
61 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
63 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
65 if (smc->sk.sk_socket &&
66 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
67 smc->sk.sk_write_space(&smc->sk);
70 /* blocks sndbuf producer until at least one byte of free space available
71 * or urgent Byte was consumed
73 static int smc_tx_wait(struct smc_sock *smc, int flags)
75 DEFINE_WAIT_FUNC(wait, woken_wake_function);
76 struct smc_connection *conn = &smc->conn;
77 struct sock *sk = &smc->sk;
81 /* similar to sk_stream_wait_memory */
82 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
83 add_wait_queue(sk_sleep(sk), &wait);
85 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
87 (sk->sk_shutdown & SEND_SHUTDOWN) ||
88 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
92 if (smc_cdc_rxed_any_close(conn)) {
97 /* ensure EPOLLOUT is subsequently generated */
98 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 if (signal_pending(current)) {
103 rc = sock_intr_errno(timeo);
106 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
107 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
108 break; /* at least 1 byte of free & no urgent data */
109 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
110 sk_wait_event(sk, &timeo,
112 (sk->sk_shutdown & SEND_SHUTDOWN) ||
113 smc_cdc_rxed_any_close(conn) ||
114 (atomic_read(&conn->sndbuf_space) &&
118 remove_wait_queue(sk_sleep(sk), &wait);
122 static bool smc_tx_is_corked(struct smc_sock *smc)
124 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
126 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
129 /* sndbuf producer: main API called by socket layer.
130 * called under sock lock.
132 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
134 size_t copylen, send_done = 0, send_remaining = len;
135 size_t chunk_len, chunk_off, chunk_len_sum;
136 struct smc_connection *conn = &smc->conn;
137 union smc_host_cursor prep;
138 struct sock *sk = &smc->sk;
144 /* This should be in poll */
145 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
147 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
152 while (msg_data_left(msg)) {
153 if (sk->sk_state == SMC_INIT)
155 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
156 (smc->sk.sk_err == ECONNABORTED) ||
157 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
159 if (smc_cdc_rxed_any_close(conn))
160 return send_done ?: -ECONNRESET;
162 if (msg->msg_flags & MSG_OOB)
163 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
165 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
168 rc = smc_tx_wait(smc, msg->msg_flags);
174 /* initialize variables for 1st iteration of subsequent loop */
175 /* could be just 1 byte, even after smc_tx_wait above */
176 writespace = atomic_read(&conn->sndbuf_space);
177 /* not more than what user space asked for */
178 copylen = min_t(size_t, send_remaining, writespace);
179 /* determine start of sndbuf */
180 sndbuf_base = conn->sndbuf_desc->cpu_addr;
181 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
182 tx_cnt_prep = prep.count;
183 /* determine chunks where to write into sndbuf */
184 /* either unwrapped case, or 1st chunk of wrapped case */
185 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
187 chunk_len_sum = chunk_len;
188 chunk_off = tx_cnt_prep;
189 smc_sndbuf_sync_sg_for_cpu(conn);
190 for (chunk = 0; chunk < 2; chunk++) {
191 rc = memcpy_from_msg(sndbuf_base + chunk_off,
194 smc_sndbuf_sync_sg_for_device(conn);
199 send_done += chunk_len;
200 send_remaining -= chunk_len;
202 if (chunk_len_sum == copylen)
203 break; /* either on 1st or 2nd iteration */
204 /* prepare next (== 2nd) iteration */
205 chunk_len = copylen - chunk_len; /* remainder */
206 chunk_len_sum += chunk_len;
207 chunk_off = 0; /* modulo offset in send ring buffer */
209 smc_sndbuf_sync_sg_for_device(conn);
211 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
212 smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
213 /* increased in send tasklet smc_cdc_tx_handler() */
214 smp_mb__before_atomic();
215 atomic_sub(copylen, &conn->sndbuf_space);
216 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
217 smp_mb__after_atomic();
218 /* since we just produced more new data into sndbuf,
219 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
221 if ((msg->msg_flags & MSG_OOB) && !send_remaining)
222 conn->urg_tx_pend = true;
223 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
224 (atomic_read(&conn->sndbuf_space) >
225 (conn->sndbuf_desc->len >> 1)))
226 /* for a corked socket defer the RDMA writes if there
227 * is still sufficient sndbuf_space available
229 schedule_delayed_work(&conn->tx_work,
232 smc_tx_sndbuf_nonempty(conn);
233 } /* while (msg_data_left(msg)) */
238 rc = sk_stream_error(sk, msg->msg_flags, rc);
239 /* make sure we wake any epoll edge trigger waiter */
240 if (unlikely(rc == -EAGAIN))
241 sk->sk_write_space(sk);
245 /***************************** sndbuf consumer *******************************/
247 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
248 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
249 u32 offset, int signal)
251 struct smc_ism_position pos;
254 memset(&pos, 0, sizeof(pos));
255 pos.token = conn->peer_token;
256 pos.index = conn->peer_rmbe_idx;
257 pos.offset = conn->tx_off + offset;
259 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
261 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
265 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
266 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
267 int num_sges, struct ib_sge sges[])
269 struct smc_link_group *lgr = conn->lgr;
270 struct ib_rdma_wr rdma_wr;
271 struct smc_link *link;
274 memset(&rdma_wr, 0, sizeof(rdma_wr));
275 link = &lgr->lnk[SMC_SINGLE_LINK];
276 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
277 rdma_wr.wr.sg_list = sges;
278 rdma_wr.wr.num_sge = num_sges;
279 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
280 rdma_wr.remote_addr =
281 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
282 /* RMBE within RMB */
284 /* offset within RMBE */
286 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
287 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL);
289 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
290 smc_lgr_terminate(lgr);
295 /* sndbuf consumer */
296 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
297 union smc_host_cursor *prod,
298 union smc_host_cursor *sent,
301 smc_curs_add(conn->peer_rmbe_size, prod, len);
302 /* increased in recv tasklet smc_cdc_msg_rcv() */
303 smp_mb__before_atomic();
304 /* data in flight reduces usable snd_wnd */
305 atomic_sub(len, &conn->peer_rmbe_space);
306 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
307 smp_mb__after_atomic();
308 smc_curs_add(conn->sndbuf_desc->len, sent, len);
311 /* SMC-R helper for smc_tx_rdma_writes() */
312 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
313 size_t src_off, size_t src_len,
314 size_t dst_off, size_t dst_len)
316 dma_addr_t dma_addr =
317 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
318 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
319 int src_len_sum = src_len, dst_len_sum = dst_len;
320 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
321 int sent_count = src_off;
322 int srcchunk, dstchunk;
326 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
328 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
329 sges[srcchunk].addr = dma_addr + src_off;
330 sges[srcchunk].length = src_len;
331 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
335 if (src_off >= conn->sndbuf_desc->len)
336 src_off -= conn->sndbuf_desc->len;
337 /* modulo in send ring */
338 if (src_len_sum == dst_len)
339 break; /* either on 1st or 2nd iteration */
340 /* prepare next (== 2nd) iteration */
341 src_len = dst_len - src_len; /* remainder */
342 src_len_sum += src_len;
344 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
347 if (dst_len_sum == len)
348 break; /* either on 1st or 2nd iteration */
349 /* prepare next (== 2nd) iteration */
350 dst_off = 0; /* modulo offset in RMBE ring buffer */
351 dst_len = len - dst_len; /* remainder */
352 dst_len_sum += dst_len;
353 src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
355 src_len_sum = src_len;
360 /* SMC-D helper for smc_tx_rdma_writes() */
361 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
362 size_t src_off, size_t src_len,
363 size_t dst_off, size_t dst_len)
365 int src_len_sum = src_len, dst_len_sum = dst_len;
366 int srcchunk, dstchunk;
369 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
370 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
371 void *data = conn->sndbuf_desc->cpu_addr + src_off;
373 rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
374 sizeof(struct smcd_cdc_msg), 0);
379 if (src_off >= conn->sndbuf_desc->len)
380 src_off -= conn->sndbuf_desc->len;
381 /* modulo in send ring */
382 if (src_len_sum == dst_len)
383 break; /* either on 1st or 2nd iteration */
384 /* prepare next (== 2nd) iteration */
385 src_len = dst_len - src_len; /* remainder */
386 src_len_sum += src_len;
388 if (dst_len_sum == len)
389 break; /* either on 1st or 2nd iteration */
390 /* prepare next (== 2nd) iteration */
391 dst_off = 0; /* modulo offset in RMBE ring buffer */
392 dst_len = len - dst_len; /* remainder */
393 dst_len_sum += dst_len;
394 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
395 src_len_sum = src_len;
400 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
401 * usable snd_wnd as max transmit
403 static int smc_tx_rdma_writes(struct smc_connection *conn)
405 size_t len, src_len, dst_off, dst_len; /* current chunk values */
406 union smc_host_cursor sent, prep, prod, cons;
407 struct smc_cdc_producer_flags *pflags;
408 int to_send, rmbespace;
412 smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
413 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
414 /* cf. wmem_alloc - (snd_max - snd_una) */
415 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
419 /* destination: RMBE */
421 rmbespace = atomic_read(&conn->peer_rmbe_space);
424 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
425 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
427 /* if usable snd_wnd closes ask peer to advertise once it opens again */
428 pflags = &conn->local_tx_ctrl.prod_flags;
429 pflags->write_blocked = (to_send >= rmbespace);
430 /* cf. usable snd_wnd */
431 len = min(to_send, rmbespace);
433 /* initialize variables for first iteration of subsequent nested loop */
434 dst_off = prod.count;
435 if (prod.wrap == cons.wrap) {
436 /* the filled destination area is unwrapped,
437 * hence the available free destination space is wrapped
438 * and we need 2 destination chunks of sum len; start with 1st
439 * which is limited by what's available in sndbuf
441 dst_len = min_t(size_t,
442 conn->peer_rmbe_size - prod.count, len);
444 /* the filled destination area is wrapped,
445 * hence the available free destination space is unwrapped
446 * and we need a single destination chunk of entire len
450 /* dst_len determines the maximum src_len */
451 if (sent.count + dst_len <= conn->sndbuf_desc->len) {
452 /* unwrapped src case: single chunk of entire dst_len */
455 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
456 src_len = conn->sndbuf_desc->len - sent.count;
459 if (conn->lgr->is_smcd)
460 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
463 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
468 if (conn->urg_tx_pend && len == to_send)
469 pflags->urg_data_present = 1;
470 smc_tx_advance_cursors(conn, &prod, &sent, len);
471 /* update connection's cursors with advanced local cursors */
472 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
474 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
479 /* Wakeup sndbuf consumers from any context (IRQ or process)
480 * since there is more data to transmit; usable snd_wnd as max transmit
482 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
484 struct smc_cdc_producer_flags *pflags;
485 struct smc_cdc_tx_pend *pend;
486 struct smc_wr_buf *wr_buf;
489 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
492 struct smc_sock *smc =
493 container_of(conn, struct smc_sock, conn);
495 if (smc->sk.sk_err == ECONNABORTED)
496 return sock_error(&smc->sk);
498 if (conn->alert_token_local) /* connection healthy */
499 mod_delayed_work(system_wq, &conn->tx_work,
505 spin_lock_bh(&conn->send_lock);
506 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
507 rc = smc_tx_rdma_writes(conn);
509 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
510 (struct smc_wr_tx_pend_priv *)pend);
515 rc = smc_cdc_msg_send(conn, wr_buf, pend);
516 pflags = &conn->local_tx_ctrl.prod_flags;
517 if (!rc && pflags->urg_data_present) {
518 pflags->urg_data_pending = 0;
519 pflags->urg_data_present = 0;
523 spin_unlock_bh(&conn->send_lock);
527 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
529 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
532 spin_lock_bh(&conn->send_lock);
533 if (!pflags->urg_data_present)
534 rc = smc_tx_rdma_writes(conn);
536 rc = smcd_cdc_msg_send(conn);
538 if (!rc && pflags->urg_data_present) {
539 pflags->urg_data_pending = 0;
540 pflags->urg_data_present = 0;
542 spin_unlock_bh(&conn->send_lock);
546 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
550 if (conn->lgr->is_smcd)
551 rc = smcd_tx_sndbuf_nonempty(conn);
553 rc = smcr_tx_sndbuf_nonempty(conn);
558 /* Wakeup sndbuf consumers from process context
559 * since there is more data to transmit
561 void smc_tx_work(struct work_struct *work)
563 struct smc_connection *conn = container_of(to_delayed_work(work),
564 struct smc_connection,
566 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
570 if (smc->sk.sk_err ||
571 !conn->alert_token_local ||
572 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
575 rc = smc_tx_sndbuf_nonempty(conn);
576 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
577 !atomic_read(&conn->bytes_to_rcv))
578 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
581 release_sock(&smc->sk);
584 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
586 union smc_host_cursor cfed, cons, prod;
587 int sender_free = conn->rmb_desc->len;
590 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
591 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
592 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
593 if (to_confirm > conn->rmbe_update_limit) {
594 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
595 sender_free = conn->rmb_desc->len -
596 smc_curs_diff_large(conn->rmb_desc->len,
600 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
602 ((to_confirm > conn->rmbe_update_limit) &&
603 ((sender_free <= (conn->rmb_desc->len / 2)) ||
604 conn->local_rx_ctrl.prod_flags.write_blocked))) {
605 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
606 conn->alert_token_local) { /* connection healthy */
607 schedule_delayed_work(&conn->tx_work,
611 smc_curs_copy(&conn->rx_curs_confirmed,
612 &conn->local_tx_ctrl.cons, conn);
613 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
615 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
616 !atomic_read(&conn->bytes_to_rcv))
617 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
620 /***************************** send initialize *******************************/
622 /* Initialize send properties on connection establishment. NB: not __init! */
623 void smc_tx_init(struct smc_sock *smc)
625 smc->sk.sk_write_space = smc_tx_write_space;