1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
7 * Copy user space data into send buffer, if send buffer space available.
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
11 * Copyright IBM Corp. 2016
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
28 #define SMC_TX_WORK_DELAY HZ
30 /***************************** sndbuf producer *******************************/
32 /* callback implementation for sk.sk_write_space()
33 * to wakeup sndbuf producers that blocked with smc_tx_wait_memory().
34 * called under sk_socket lock.
36 static void smc_tx_write_space(struct sock *sk)
38 struct socket *sock = sk->sk_socket;
39 struct smc_sock *smc = smc_sk(sk);
42 /* similar to sk_stream_write_space */
43 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
44 clear_bit(SOCK_NOSPACE, &sock->flags);
46 wq = rcu_dereference(sk->sk_wq);
47 if (skwq_has_sleeper(wq))
48 wake_up_interruptible_poll(&wq->wait,
49 POLLOUT | POLLWRNORM |
51 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
52 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
57 /* Wakeup sndbuf producers that blocked with smc_tx_wait_memory().
58 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
60 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
62 if (smc->sk.sk_socket &&
63 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
64 smc->sk.sk_write_space(&smc->sk);
67 /* blocks sndbuf producer until at least one byte of free space available */
68 static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
70 DEFINE_WAIT_FUNC(wait, woken_wake_function);
71 struct smc_connection *conn = &smc->conn;
72 struct sock *sk = &smc->sk;
76 /* similar to sk_stream_wait_memory */
77 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
78 add_wait_queue(sk_sleep(sk), &wait);
80 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
82 (sk->sk_shutdown & SEND_SHUTDOWN) ||
83 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
87 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
92 /* ensure EPOLLOUT is subsequently generated */
93 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
97 if (signal_pending(current)) {
98 rc = sock_intr_errno(timeo);
101 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
102 if (atomic_read(&conn->sndbuf_space))
103 break; /* at least 1 byte of free space available */
104 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
105 sk->sk_write_pending++;
106 sk_wait_event(sk, &timeo,
108 (sk->sk_shutdown & SEND_SHUTDOWN) ||
109 smc_cdc_rxed_any_close_or_senddone(conn) ||
110 atomic_read(&conn->sndbuf_space),
112 sk->sk_write_pending--;
114 remove_wait_queue(sk_sleep(sk), &wait);
118 /* sndbuf producer: main API called by socket layer.
119 * called under sock lock.
121 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
123 size_t copylen, send_done = 0, send_remaining = len;
124 size_t chunk_len, chunk_off, chunk_len_sum;
125 struct smc_connection *conn = &smc->conn;
126 union smc_host_cursor prep;
127 struct sock *sk = &smc->sk;
133 /* This should be in poll */
134 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
136 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
141 while (msg_data_left(msg)) {
142 if (sk->sk_state == SMC_INIT)
144 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
145 (smc->sk.sk_err == ECONNABORTED) ||
146 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
148 if (smc_cdc_rxed_any_close(conn))
149 return send_done ?: -ECONNRESET;
151 if (!atomic_read(&conn->sndbuf_space)) {
152 rc = smc_tx_wait_memory(smc, msg->msg_flags);
161 /* initialize variables for 1st iteration of subsequent loop */
162 /* could be just 1 byte, even after smc_tx_wait_memory above */
163 writespace = atomic_read(&conn->sndbuf_space);
164 /* not more than what user space asked for */
165 copylen = min_t(size_t, send_remaining, writespace);
166 /* determine start of sndbuf */
167 sndbuf_base = conn->sndbuf_desc->cpu_addr;
168 smc_curs_write(&prep,
169 smc_curs_read(&conn->tx_curs_prep, conn),
171 tx_cnt_prep = prep.count;
172 /* determine chunks where to write into sndbuf */
173 /* either unwrapped case, or 1st chunk of wrapped case */
174 chunk_len = min_t(size_t,
175 copylen, conn->sndbuf_size - tx_cnt_prep);
176 chunk_len_sum = chunk_len;
177 chunk_off = tx_cnt_prep;
178 smc_sndbuf_sync_sg_for_cpu(conn);
179 for (chunk = 0; chunk < 2; chunk++) {
180 rc = memcpy_from_msg(sndbuf_base + chunk_off,
183 smc_sndbuf_sync_sg_for_device(conn);
188 send_done += chunk_len;
189 send_remaining -= chunk_len;
191 if (chunk_len_sum == copylen)
192 break; /* either on 1st or 2nd iteration */
193 /* prepare next (== 2nd) iteration */
194 chunk_len = copylen - chunk_len; /* remainder */
195 chunk_len_sum += chunk_len;
196 chunk_off = 0; /* modulo offset in send ring buffer */
198 smc_sndbuf_sync_sg_for_device(conn);
200 smc_curs_add(conn->sndbuf_size, &prep, copylen);
201 smc_curs_write(&conn->tx_curs_prep,
202 smc_curs_read(&prep, conn),
204 /* increased in send tasklet smc_cdc_tx_handler() */
205 smp_mb__before_atomic();
206 atomic_sub(copylen, &conn->sndbuf_space);
207 /* guarantee 0 <= sndbuf_space <= sndbuf_size */
208 smp_mb__after_atomic();
209 /* since we just produced more new data into sndbuf,
210 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
212 smc_tx_sndbuf_nonempty(conn);
213 } /* while (msg_data_left(msg)) */
218 rc = sk_stream_error(sk, msg->msg_flags, rc);
219 /* make sure we wake any epoll edge trigger waiter */
220 if (unlikely(rc == -EAGAIN))
221 sk->sk_write_space(sk);
225 /***************************** sndbuf consumer *******************************/
227 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
228 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
229 int num_sges, struct ib_sge sges[])
231 struct smc_link_group *lgr = conn->lgr;
232 struct ib_send_wr *failed_wr = NULL;
233 struct ib_rdma_wr rdma_wr;
234 struct smc_link *link;
237 memset(&rdma_wr, 0, sizeof(rdma_wr));
238 link = &lgr->lnk[SMC_SINGLE_LINK];
239 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
240 rdma_wr.wr.sg_list = sges;
241 rdma_wr.wr.num_sge = num_sges;
242 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
243 rdma_wr.remote_addr =
244 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
245 /* RMBE within RMB */
246 ((conn->peer_conn_idx - 1) * conn->peer_rmbe_size) +
247 /* offset within RMBE */
249 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
250 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
252 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
256 /* sndbuf consumer */
257 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
258 union smc_host_cursor *prod,
259 union smc_host_cursor *sent,
262 smc_curs_add(conn->peer_rmbe_size, prod, len);
263 /* increased in recv tasklet smc_cdc_msg_rcv() */
264 smp_mb__before_atomic();
265 /* data in flight reduces usable snd_wnd */
266 atomic_sub(len, &conn->peer_rmbe_space);
267 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
268 smp_mb__after_atomic();
269 smc_curs_add(conn->sndbuf_size, sent, len);
272 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
273 * usable snd_wnd as max transmit
275 static int smc_tx_rdma_writes(struct smc_connection *conn)
277 size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
278 size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
279 union smc_host_cursor sent, prep, prod, cons;
280 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
281 struct smc_link_group *lgr = conn->lgr;
282 int to_send, rmbespace;
283 struct smc_link *link;
289 smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
290 smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
291 /* cf. wmem_alloc - (snd_max - snd_una) */
292 to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep);
296 /* destination: RMBE */
298 rmbespace = atomic_read(&conn->peer_rmbe_space);
301 smc_curs_write(&prod,
302 smc_curs_read(&conn->local_tx_ctrl.prod, conn),
304 smc_curs_write(&cons,
305 smc_curs_read(&conn->local_rx_ctrl.cons, conn),
308 /* if usable snd_wnd closes ask peer to advertise once it opens again */
309 conn->local_tx_ctrl.prod_flags.write_blocked = (to_send >= rmbespace);
310 /* cf. usable snd_wnd */
311 len = min(to_send, rmbespace);
313 /* initialize variables for first iteration of subsequent nested loop */
314 link = &lgr->lnk[SMC_SINGLE_LINK];
315 dst_off = prod.count;
316 if (prod.wrap == cons.wrap) {
317 /* the filled destination area is unwrapped,
318 * hence the available free destination space is wrapped
319 * and we need 2 destination chunks of sum len; start with 1st
320 * which is limited by what's available in sndbuf
322 dst_len = min_t(size_t,
323 conn->peer_rmbe_size - prod.count, len);
325 /* the filled destination area is wrapped,
326 * hence the available free destination space is unwrapped
327 * and we need a single destination chunk of entire len
331 dst_len_sum = dst_len;
332 src_off = sent.count;
333 /* dst_len determines the maximum src_len */
334 if (sent.count + dst_len <= conn->sndbuf_size) {
335 /* unwrapped src case: single chunk of entire dst_len */
338 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
339 src_len = conn->sndbuf_size - sent.count;
341 src_len_sum = src_len;
342 dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
343 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
345 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
346 sges[srcchunk].addr = dma_addr + src_off;
347 sges[srcchunk].length = src_len;
348 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
351 if (src_off >= conn->sndbuf_size)
352 src_off -= conn->sndbuf_size;
353 /* modulo in send ring */
354 if (src_len_sum == dst_len)
355 break; /* either on 1st or 2nd iteration */
356 /* prepare next (== 2nd) iteration */
357 src_len = dst_len - src_len; /* remainder */
358 src_len_sum += src_len;
360 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
363 if (dst_len_sum == len)
364 break; /* either on 1st or 2nd iteration */
365 /* prepare next (== 2nd) iteration */
366 dst_off = 0; /* modulo offset in RMBE ring buffer */
367 dst_len = len - dst_len; /* remainder */
368 dst_len_sum += dst_len;
370 dst_len, conn->sndbuf_size - sent.count);
371 src_len_sum = src_len;
374 smc_tx_advance_cursors(conn, &prod, &sent, len);
375 /* update connection's cursors with advanced local cursors */
376 smc_curs_write(&conn->local_tx_ctrl.prod,
377 smc_curs_read(&prod, conn),
380 smc_curs_write(&conn->tx_curs_sent,
381 smc_curs_read(&sent, conn),
383 /* src: local sndbuf */
388 /* Wakeup sndbuf consumers from any context (IRQ or process)
389 * since there is more data to transmit; usable snd_wnd as max transmit
391 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
393 struct smc_cdc_tx_pend *pend;
394 struct smc_wr_buf *wr_buf;
397 spin_lock_bh(&conn->send_lock);
398 rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], &wr_buf,
402 struct smc_sock *smc =
403 container_of(conn, struct smc_sock, conn);
405 if (smc->sk.sk_err == ECONNABORTED) {
406 rc = sock_error(&smc->sk);
410 schedule_delayed_work(&conn->tx_work,
416 rc = smc_tx_rdma_writes(conn);
418 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
419 (struct smc_wr_tx_pend_priv *)pend);
423 rc = smc_cdc_msg_send(conn, wr_buf, pend);
426 spin_unlock_bh(&conn->send_lock);
430 /* Wakeup sndbuf consumers from process context
431 * since there is more data to transmit
433 static void smc_tx_work(struct work_struct *work)
435 struct smc_connection *conn = container_of(to_delayed_work(work),
436 struct smc_connection,
438 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
442 rc = smc_tx_sndbuf_nonempty(conn);
443 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
444 !atomic_read(&conn->bytes_to_rcv))
445 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
446 release_sock(&smc->sk);
449 void smc_tx_consumer_update(struct smc_connection *conn)
451 union smc_host_cursor cfed, cons;
452 struct smc_cdc_tx_pend *pend;
453 struct smc_wr_buf *wr_buf;
456 smc_curs_write(&cons,
457 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
459 smc_curs_write(&cfed,
460 smc_curs_read(&conn->rx_curs_confirmed, conn),
462 to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons);
464 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
465 ((to_confirm > conn->rmbe_update_limit) &&
466 ((to_confirm > (conn->rmbe_size / 2)) ||
467 conn->local_rx_ctrl.prod_flags.write_blocked))) {
468 rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
471 rc = smc_cdc_msg_send(conn, wr_buf, pend);
473 schedule_delayed_work(&conn->tx_work,
477 smc_curs_write(&conn->rx_curs_confirmed,
478 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
480 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
482 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
483 !atomic_read(&conn->bytes_to_rcv))
484 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
487 /***************************** send initialize *******************************/
489 /* Initialize send properties on connection establishment. NB: not __init! */
490 void smc_tx_init(struct smc_sock *smc)
492 smc->sk.sk_write_space = smc_tx_write_space;
493 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
494 spin_lock_init(&smc->conn.send_lock);