GNU Linux-libre 4.14.251-gnu1
[releases.git] / net / rxrpc / conn_service.c
1 /* Service connection management
2  *
3  * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11
12 #include <linux/slab.h>
13 #include "ar-internal.h"
14
15 /*
16  * Find a service connection under RCU conditions.
17  *
18  * We could use a hash table, but that is subject to bucket stuffing by an
19  * attacker as the client gets to pick the epoch and cid values and would know
20  * the hash function.  So, instead, we use a hash table for the peer and from
21  * that an rbtree to find the service connection.  Under ordinary circumstances
22  * it might be slower than a large hash table, but it is at least limited in
23  * depth.
24  */
25 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
26                                                      struct sk_buff *skb)
27 {
28         struct rxrpc_connection *conn = NULL;
29         struct rxrpc_conn_proto k;
30         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
31         struct rb_node *p;
32         unsigned int seq = 0;
33
34         k.epoch = sp->hdr.epoch;
35         k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
36
37         do {
38                 /* Unfortunately, rbtree walking doesn't give reliable results
39                  * under just the RCU read lock, so we have to check for
40                  * changes.
41                  */
42                 read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
43
44                 p = rcu_dereference_raw(peer->service_conns.rb_node);
45                 while (p) {
46                         conn = rb_entry(p, struct rxrpc_connection, service_node);
47
48                         if (conn->proto.index_key < k.index_key)
49                                 p = rcu_dereference_raw(p->rb_left);
50                         else if (conn->proto.index_key > k.index_key)
51                                 p = rcu_dereference_raw(p->rb_right);
52                         else
53                                 break;
54                         conn = NULL;
55                 }
56         } while (need_seqretry(&peer->service_conn_lock, seq));
57
58         done_seqretry(&peer->service_conn_lock, seq);
59         _leave(" = %d", conn ? conn->debug_id : -1);
60         return conn;
61 }
62
63 /*
64  * Insert a service connection into a peer's tree, thereby making it a target
65  * for incoming packets.
66  */
67 static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
68                                        struct rxrpc_connection *conn)
69 {
70         struct rxrpc_connection *cursor = NULL;
71         struct rxrpc_conn_proto k = conn->proto;
72         struct rb_node **pp, *parent;
73
74         write_seqlock_bh(&peer->service_conn_lock);
75
76         pp = &peer->service_conns.rb_node;
77         parent = NULL;
78         while (*pp) {
79                 parent = *pp;
80                 cursor = rb_entry(parent,
81                                   struct rxrpc_connection, service_node);
82
83                 if (cursor->proto.index_key < k.index_key)
84                         pp = &(*pp)->rb_left;
85                 else if (cursor->proto.index_key > k.index_key)
86                         pp = &(*pp)->rb_right;
87                 else
88                         goto found_extant_conn;
89         }
90
91         rb_link_node_rcu(&conn->service_node, parent, pp);
92         rb_insert_color(&conn->service_node, &peer->service_conns);
93 conn_published:
94         set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
95         write_sequnlock_bh(&peer->service_conn_lock);
96         _leave(" = %d [new]", conn->debug_id);
97         return;
98
99 found_extant_conn:
100         if (atomic_read(&cursor->usage) == 0)
101                 goto replace_old_connection;
102         write_sequnlock_bh(&peer->service_conn_lock);
103         /* We should not be able to get here.  rxrpc_incoming_connection() is
104          * called in a non-reentrant context, so there can't be a race to
105          * insert a new connection.
106          */
107         BUG();
108
109 replace_old_connection:
110         /* The old connection is from an outdated epoch. */
111         _debug("replace conn");
112         rb_replace_node_rcu(&cursor->service_node,
113                             &conn->service_node,
114                             &peer->service_conns);
115         clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
116         goto conn_published;
117 }
118
119 /*
120  * Preallocate a service connection.  The connection is placed on the proc and
121  * reap lists so that we don't have to get the lock from BH context.
122  */
123 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
124                                                            gfp_t gfp)
125 {
126         struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
127
128         if (conn) {
129                 /* We maintain an extra ref on the connection whilst it is on
130                  * the rxrpc_connections list.
131                  */
132                 conn->state = RXRPC_CONN_SERVICE_PREALLOC;
133                 atomic_set(&conn->usage, 2);
134
135                 write_lock(&rxnet->conn_lock);
136                 list_add_tail(&conn->link, &rxnet->service_conns);
137                 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
138                 write_unlock(&rxnet->conn_lock);
139
140                 trace_rxrpc_conn(conn, rxrpc_conn_new_service,
141                                  atomic_read(&conn->usage),
142                                  __builtin_return_address(0));
143         }
144
145         return conn;
146 }
147
148 /*
149  * Set up an incoming connection.  This is called in BH context with the RCU
150  * read lock held.
151  */
152 void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
153                                    struct rxrpc_connection *conn,
154                                    struct sk_buff *skb)
155 {
156         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
157
158         _enter("");
159
160         conn->proto.epoch       = sp->hdr.epoch;
161         conn->proto.cid         = sp->hdr.cid & RXRPC_CIDMASK;
162         conn->params.service_id = sp->hdr.serviceId;
163         conn->service_id        = sp->hdr.serviceId;
164         conn->security_ix       = sp->hdr.securityIndex;
165         conn->out_clientflag    = 0;
166         if (conn->security_ix)
167                 conn->state     = RXRPC_CONN_SERVICE_UNSECURED;
168         else
169                 conn->state     = RXRPC_CONN_SERVICE;
170
171         /* See if we should upgrade the service.  This can only happen on the
172          * first packet on a new connection.  Once done, it applies to all
173          * subsequent calls on that connection.
174          */
175         if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
176             conn->service_id == rx->service_upgrade.from)
177                 conn->service_id = rx->service_upgrade.to;
178
179         /* Make the connection a target for incoming packets. */
180         rxrpc_publish_service_conn(conn->params.peer, conn);
181
182         _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
183 }
184
185 /*
186  * Remove the service connection from the peer's tree, thereby removing it as a
187  * target for incoming packets.
188  */
189 void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
190 {
191         struct rxrpc_peer *peer = conn->params.peer;
192
193         write_seqlock_bh(&peer->service_conn_lock);
194         if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
195                 rb_erase(&conn->service_node, &peer->service_conns);
196         write_sequnlock_bh(&peer->service_conn_lock);
197 }