1 /* incoming call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
25 #include <net/af_rxrpc.h>
27 #include "ar-internal.h"
29 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
30 unsigned long user_call_ID)
35 * Preallocate a single service call, connection and peer and, if possible,
36 * give them a user ID and attach the user's side of the ID to them.
38 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
39 struct rxrpc_backlog *b,
40 rxrpc_notify_rx_t notify_rx,
41 rxrpc_user_attach_call_t user_attach_call,
42 unsigned long user_call_ID, gfp_t gfp)
44 const void *here = __builtin_return_address(0);
45 struct rxrpc_call *call;
47 unsigned int size = RXRPC_BACKLOG_MAX;
48 unsigned int head, tail, call_head, call_tail;
50 max = rx->sk.sk_max_ack_backlog;
51 tmp = rx->sk.sk_ack_backlog;
53 _leave(" = -ENOBUFS [full %u]", max);
58 /* We don't need more conns and peers than we have calls, but on the
59 * other hand, we shouldn't ever use more peers than conns or conns
62 call_head = b->call_backlog_head;
63 call_tail = READ_ONCE(b->call_backlog_tail);
64 tmp = CIRC_CNT(call_head, call_tail, size);
66 _leave(" = -ENOBUFS [enough %u]", tmp);
71 head = b->peer_backlog_head;
72 tail = READ_ONCE(b->peer_backlog_tail);
73 if (CIRC_CNT(head, tail, size) < max) {
74 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
77 b->peer_backlog[head] = peer;
78 smp_store_release(&b->peer_backlog_head,
79 (head + 1) & (size - 1));
82 head = b->conn_backlog_head;
83 tail = READ_ONCE(b->conn_backlog_tail);
84 if (CIRC_CNT(head, tail, size) < max) {
85 struct rxrpc_connection *conn;
87 conn = rxrpc_prealloc_service_connection(gfp);
90 b->conn_backlog[head] = conn;
91 smp_store_release(&b->conn_backlog_head,
92 (head + 1) & (size - 1));
94 trace_rxrpc_conn(conn, rxrpc_conn_new_service,
95 atomic_read(&conn->usage), here);
98 /* Now it gets complicated, because calls get registered with the
99 * socket here, particularly if a user ID is preassigned by the user.
101 call = rxrpc_alloc_call(gfp);
104 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
105 call->state = RXRPC_CALL_SERVER_PREALLOC;
107 trace_rxrpc_call(call, rxrpc_call_new_service,
108 atomic_read(&call->usage),
109 here, (const void *)user_call_ID);
111 write_lock(&rx->call_lock);
112 if (user_attach_call) {
113 struct rxrpc_call *xcall;
114 struct rb_node *parent, **pp;
116 /* Check the user ID isn't already in use */
117 pp = &rx->calls.rb_node;
121 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
122 if (user_call_ID < call->user_call_ID)
123 pp = &(*pp)->rb_left;
124 else if (user_call_ID > call->user_call_ID)
125 pp = &(*pp)->rb_right;
130 call->user_call_ID = user_call_ID;
131 call->notify_rx = notify_rx;
132 rxrpc_get_call(call, rxrpc_call_got_kernel);
133 user_attach_call(call, user_call_ID);
134 rxrpc_get_call(call, rxrpc_call_got_userid);
135 rb_link_node(&call->sock_node, parent, pp);
136 rb_insert_color(&call->sock_node, &rx->calls);
137 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
140 list_add(&call->sock_link, &rx->sock_calls);
142 write_unlock(&rx->call_lock);
144 write_lock(&rxrpc_call_lock);
145 list_add_tail(&call->link, &rxrpc_calls);
146 write_unlock(&rxrpc_call_lock);
148 b->call_backlog[call_head] = call;
149 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
150 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
154 write_unlock(&rx->call_lock);
155 rxrpc_cleanup_call(call);
156 _leave(" = -EBADSLT");
161 * Preallocate sufficient service connections, calls and peers to cover the
162 * entire backlog of a socket. When a new call comes in, if we don't have
163 * sufficient of each available, the call gets rejected as busy or ignored.
165 * The backlog is replenished when a connection is accepted or rejected.
167 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
169 struct rxrpc_backlog *b = rx->backlog;
172 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
178 if (rx->discard_new_call)
181 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
188 * Discard the preallocation on a service.
190 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
192 struct rxrpc_backlog *b = rx->backlog;
193 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
199 /* Make sure that there aren't any incoming calls in progress before we
200 * clear the preallocation buffers.
202 spin_lock_bh(&rx->incoming_lock);
203 spin_unlock_bh(&rx->incoming_lock);
205 head = b->peer_backlog_head;
206 tail = b->peer_backlog_tail;
207 while (CIRC_CNT(head, tail, size) > 0) {
208 struct rxrpc_peer *peer = b->peer_backlog[tail];
210 tail = (tail + 1) & (size - 1);
213 head = b->conn_backlog_head;
214 tail = b->conn_backlog_tail;
215 while (CIRC_CNT(head, tail, size) > 0) {
216 struct rxrpc_connection *conn = b->conn_backlog[tail];
217 write_lock(&rxrpc_connection_lock);
218 list_del(&conn->link);
219 list_del(&conn->proc_link);
220 write_unlock(&rxrpc_connection_lock);
222 tail = (tail + 1) & (size - 1);
225 head = b->call_backlog_head;
226 tail = b->call_backlog_tail;
227 while (CIRC_CNT(head, tail, size) > 0) {
228 struct rxrpc_call *call = b->call_backlog[tail];
229 if (rx->discard_new_call) {
230 _debug("discard %lx", call->user_call_ID);
231 rx->discard_new_call(call, call->user_call_ID);
233 call->notify_rx = rxrpc_dummy_notify;
234 rxrpc_put_call(call, rxrpc_call_put_kernel);
236 rxrpc_call_completed(call);
237 rxrpc_release_call(rx, call);
238 rxrpc_put_call(call, rxrpc_call_put);
239 tail = (tail + 1) & (size - 1);
246 * Allocate a new incoming call from the prealloc pool, along with a connection
247 * and a peer as necessary.
249 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
250 struct rxrpc_local *local,
251 struct rxrpc_connection *conn,
254 struct rxrpc_backlog *b = rx->backlog;
255 struct rxrpc_peer *peer, *xpeer;
256 struct rxrpc_call *call;
257 unsigned short call_head, conn_head, peer_head;
258 unsigned short call_tail, conn_tail, peer_tail;
259 unsigned short call_count, conn_count;
261 /* #calls >= #conns >= #peers must hold true. */
262 call_head = smp_load_acquire(&b->call_backlog_head);
263 call_tail = b->call_backlog_tail;
264 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
265 conn_head = smp_load_acquire(&b->conn_backlog_head);
266 conn_tail = b->conn_backlog_tail;
267 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
268 ASSERTCMP(conn_count, >=, call_count);
269 peer_head = smp_load_acquire(&b->peer_backlog_head);
270 peer_tail = b->peer_backlog_tail;
271 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
278 /* No connection. We're going to need a peer to start off
279 * with. If one doesn't yet exist, use a spare from the
280 * preallocation set. We dump the address into the spare in
281 * anticipation - and to save on stack space.
283 xpeer = b->peer_backlog[peer_tail];
284 if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
287 peer = rxrpc_lookup_incoming_peer(local, xpeer);
289 b->peer_backlog[peer_tail] = NULL;
290 smp_store_release(&b->peer_backlog_tail,
292 (RXRPC_BACKLOG_MAX - 1));
295 /* Now allocate and set up the connection */
296 conn = b->conn_backlog[conn_tail];
297 b->conn_backlog[conn_tail] = NULL;
298 smp_store_release(&b->conn_backlog_tail,
299 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
300 rxrpc_get_local(local);
301 conn->params.local = local;
302 conn->params.peer = peer;
303 rxrpc_see_connection(conn);
304 rxrpc_new_incoming_connection(conn, skb);
306 rxrpc_get_connection(conn);
309 /* And now we can allocate and set up a new call */
310 call = b->call_backlog[call_tail];
311 b->call_backlog[call_tail] = NULL;
312 smp_store_release(&b->call_backlog_tail,
313 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
315 rxrpc_see_call(call);
317 call->peer = rxrpc_get_peer(conn->params.peer);
322 * Set up a new incoming call. Called in BH context with the RCU read lock
325 * If this is for a kernel service, when we allocate the call, it will have
326 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
327 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
328 * services only have the ref from the backlog buffer. We want to pass this
329 * ref to non-BH context to dispose of.
331 * If we want to report an error, we mark the skb with the packet type and
332 * abort code and return NULL.
334 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
335 struct rxrpc_connection *conn,
338 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
339 struct rxrpc_sock *rx;
340 struct rxrpc_call *call;
341 u16 service_id = sp->hdr.serviceId;
345 /* Get the socket providing the service */
346 rx = rcu_dereference(local->service);
347 if (rx && service_id == rx->srx.srx_service)
350 trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
351 RX_INVALID_OPERATION, EOPNOTSUPP);
352 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
353 skb->priority = RX_INVALID_OPERATION;
354 _leave(" = NULL [service]");
358 spin_lock(&rx->incoming_lock);
359 if (rx->sk.sk_state == RXRPC_CLOSE) {
360 trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
361 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
362 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
363 skb->priority = RX_INVALID_OPERATION;
364 _leave(" = NULL [close]");
369 call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
371 skb->mark = RXRPC_SKB_MARK_BUSY;
372 _leave(" = NULL [busy]");
377 trace_rxrpc_receive(call, rxrpc_receive_incoming,
378 sp->hdr.serial, sp->hdr.seq);
380 /* Make the call live. */
381 rxrpc_incoming_call(rx, call, skb);
384 if (rx->notify_new_call)
385 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
387 sk_acceptq_added(&rx->sk);
389 spin_lock(&conn->state_lock);
390 switch (conn->state) {
391 case RXRPC_CONN_SERVICE_UNSECURED:
392 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
393 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
394 rxrpc_queue_conn(call->conn);
397 case RXRPC_CONN_SERVICE:
398 write_lock(&call->state_lock);
399 if (rx->discard_new_call)
400 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
402 call->state = RXRPC_CALL_SERVER_ACCEPTING;
403 write_unlock(&call->state_lock);
406 case RXRPC_CONN_REMOTELY_ABORTED:
407 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
408 conn->remote_abort, ECONNABORTED);
410 case RXRPC_CONN_LOCALLY_ABORTED:
411 rxrpc_abort_call("CON", call, sp->hdr.seq,
412 conn->local_abort, ECONNABORTED);
417 spin_unlock(&conn->state_lock);
419 if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
420 rxrpc_notify_socket(call);
422 /* We have to discard the prealloc queue's ref here and rely on a
423 * combination of the RCU read lock and refs held either by the socket
424 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
425 * service to prevent the call from being deallocated too early.
427 rxrpc_put_call(call, rxrpc_call_put);
429 _leave(" = %p{%d}", call, call->debug_id);
431 spin_unlock(&rx->incoming_lock);
436 * handle acceptance of a call by userspace
437 * - assign the user call ID to the call at the front of the queue
439 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
440 unsigned long user_call_ID,
441 rxrpc_notify_rx_t notify_rx)
443 struct rxrpc_call *call;
444 struct rb_node *parent, **pp;
447 _enter(",%lx", user_call_ID);
449 ASSERT(!irqs_disabled());
451 write_lock(&rx->call_lock);
453 if (list_empty(&rx->to_be_accepted)) {
454 write_unlock(&rx->call_lock);
455 kleave(" = -ENODATA [empty]");
456 return ERR_PTR(-ENODATA);
459 /* check the user ID isn't already in use */
460 pp = &rx->calls.rb_node;
464 call = rb_entry(parent, struct rxrpc_call, sock_node);
466 if (user_call_ID < call->user_call_ID)
467 pp = &(*pp)->rb_left;
468 else if (user_call_ID > call->user_call_ID)
469 pp = &(*pp)->rb_right;
474 /* Dequeue the first call and check it's still valid. We gain
475 * responsibility for the queue's reference.
477 call = list_entry(rx->to_be_accepted.next,
478 struct rxrpc_call, accept_link);
479 list_del_init(&call->accept_link);
480 sk_acceptq_removed(&rx->sk);
481 rxrpc_see_call(call);
483 write_lock_bh(&call->state_lock);
484 switch (call->state) {
485 case RXRPC_CALL_SERVER_ACCEPTING:
486 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
488 case RXRPC_CALL_COMPLETE:
495 /* formalise the acceptance */
496 call->notify_rx = notify_rx;
497 call->user_call_ID = user_call_ID;
498 rxrpc_get_call(call, rxrpc_call_got_userid);
499 rb_link_node(&call->sock_node, parent, pp);
500 rb_insert_color(&call->sock_node, &rx->calls);
501 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
504 write_unlock_bh(&call->state_lock);
505 write_unlock(&rx->call_lock);
506 rxrpc_notify_socket(call);
507 rxrpc_service_prealloc(rx, GFP_KERNEL);
508 _leave(" = %p{%d}", call, call->debug_id);
512 _debug("release %p", call);
513 write_unlock_bh(&call->state_lock);
514 write_unlock(&rx->call_lock);
515 rxrpc_release_call(rx, call);
516 rxrpc_put_call(call, rxrpc_call_put);
521 write_unlock(&rx->call_lock);
523 rxrpc_service_prealloc(rx, GFP_KERNEL);
524 _leave(" = %d", ret);
529 * Handle rejection of a call by userspace
530 * - reject the call at the front of the queue
532 int rxrpc_reject_call(struct rxrpc_sock *rx)
534 struct rxrpc_call *call;
540 ASSERT(!irqs_disabled());
542 write_lock(&rx->call_lock);
544 if (list_empty(&rx->to_be_accepted)) {
545 write_unlock(&rx->call_lock);
549 /* Dequeue the first call and check it's still valid. We gain
550 * responsibility for the queue's reference.
552 call = list_entry(rx->to_be_accepted.next,
553 struct rxrpc_call, accept_link);
554 list_del_init(&call->accept_link);
555 sk_acceptq_removed(&rx->sk);
556 rxrpc_see_call(call);
558 write_lock_bh(&call->state_lock);
559 switch (call->state) {
560 case RXRPC_CALL_SERVER_ACCEPTING:
561 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
564 case RXRPC_CALL_COMPLETE:
572 write_unlock_bh(&call->state_lock);
573 write_unlock(&rx->call_lock);
575 rxrpc_send_abort_packet(call);
576 rxrpc_release_call(rx, call);
577 rxrpc_put_call(call, rxrpc_call_put);
579 rxrpc_service_prealloc(rx, GFP_KERNEL);
580 _leave(" = %d", ret);
585 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
586 * @sock: The socket on which to preallocate
587 * @notify_rx: Event notification function for the call
588 * @user_attach_call: Func to attach call to user_call_ID
589 * @user_call_ID: The tag to attach to the preallocated call
590 * @gfp: The allocation conditions.
592 * Charge up the socket with preallocated calls, each with a user ID. A
593 * function should be provided to effect the attachment from the user's side.
594 * The user is given a ref to hold on the call.
596 * Note that the call may be come connected before this function returns.
598 int rxrpc_kernel_charge_accept(struct socket *sock,
599 rxrpc_notify_rx_t notify_rx,
600 rxrpc_user_attach_call_t user_attach_call,
601 unsigned long user_call_ID, gfp_t gfp)
603 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
604 struct rxrpc_backlog *b = rx->backlog;
606 if (sock->sk->sk_state == RXRPC_CLOSE)
609 return rxrpc_service_prealloc_one(rx, b, notify_rx,
610 user_attach_call, user_call_ID,
613 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);