1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
14 #include <net/strparser.h>
16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
33 /* The extra two elements:
34 * 1) used for chaining the front and sections when the list becomes
35 * partitioned (e.g. end < start). The crypto APIs require the
37 * 2) to chain tailer SG entries after the message.
39 struct scatterlist data[MAX_MSG_FRAGS + 2];
41 static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
43 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
52 struct sock *sk_redir;
54 struct list_head list;
57 struct sk_psock_progs {
58 struct bpf_prog *msg_parser;
59 struct bpf_prog *stream_parser;
60 struct bpf_prog *stream_verdict;
61 struct bpf_prog *skb_verdict;
64 enum sk_psock_state_bits {
66 SK_PSOCK_RX_STRP_ENABLED,
69 struct sk_psock_link {
70 struct list_head list;
75 struct sk_psock_work_state {
83 struct sock *sk_redir;
87 bool redir_ingress; /* undefined if sk_redir is null */
89 struct sk_psock_progs progs;
90 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
91 struct strparser strp;
93 struct sk_buff_head ingress_skb;
94 struct list_head ingress_msg;
95 spinlock_t ingress_lock;
97 struct list_head link;
100 void (*saved_unhash)(struct sock *sk);
101 void (*saved_destroy)(struct sock *sk);
102 void (*saved_close)(struct sock *sk, long timeout);
103 void (*saved_write_space)(struct sock *sk);
104 void (*saved_data_ready)(struct sock *sk);
105 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
107 struct proto *sk_proto;
108 struct mutex work_mutex;
109 struct sk_psock_work_state work_state;
110 struct work_struct work;
111 struct rcu_work rwork;
114 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
115 int elem_first_coalesce);
116 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
118 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
119 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
120 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
121 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
122 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
125 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
126 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
128 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
129 struct sk_msg *msg, u32 bytes);
130 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
131 struct sk_msg *msg, u32 bytes);
132 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
134 bool sk_msg_is_readable(struct sock *sk);
136 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
138 WARN_ON(i == msg->sg.end && bytes);
141 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
143 if (psock->apply_bytes) {
144 if (psock->apply_bytes < bytes)
145 psock->apply_bytes = 0;
147 psock->apply_bytes -= bytes;
151 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
153 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
156 #define sk_msg_iter_var_prev(var) \
159 var = NR_MSG_FRAG_IDS - 1; \
164 #define sk_msg_iter_var_next(var) \
167 if (var == NR_MSG_FRAG_IDS) \
171 #define sk_msg_iter_prev(msg, which) \
172 sk_msg_iter_var_prev(msg->sg.which)
174 #define sk_msg_iter_next(msg, which) \
175 sk_msg_iter_var_next(msg->sg.which)
177 static inline void sk_msg_clear_meta(struct sk_msg *msg)
179 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
182 static inline void sk_msg_init(struct sk_msg *msg)
184 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
185 memset(msg, 0, sizeof(*msg));
186 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
189 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
192 dst->sg.data[which] = src->sg.data[which];
193 dst->sg.data[which].length = size;
194 dst->sg.size += size;
195 src->sg.size -= size;
196 src->sg.data[which].length -= size;
197 src->sg.data[which].offset += size;
200 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
202 memcpy(dst, src, sizeof(*src));
206 static inline bool sk_msg_full(const struct sk_msg *msg)
208 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
211 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
213 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
216 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
218 return &msg->sg.data[which];
221 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
223 return msg->sg.data[which];
226 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
228 return sg_page(sk_msg_elem(msg, which));
231 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
233 return msg->flags & BPF_F_INGRESS;
236 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
238 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
240 if (test_bit(msg->sg.start, &msg->sg.copy)) {
242 msg->data_end = NULL;
244 msg->data = sg_virt(sge);
245 msg->data_end = msg->data + sge->length;
249 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
252 struct scatterlist *sge;
255 sge = sk_msg_elem(msg, msg->sg.end);
256 sg_set_page(sge, page, len, offset);
259 __set_bit(msg->sg.end, &msg->sg.copy);
261 sk_msg_iter_next(msg, end);
264 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
268 __set_bit(i, &msg->sg.copy);
270 __clear_bit(i, &msg->sg.copy);
271 sk_msg_iter_var_next(i);
272 if (i == msg->sg.end)
277 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
279 sk_msg_sg_copy(msg, start, true);
282 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
284 sk_msg_sg_copy(msg, start, false);
287 static inline struct sk_psock *sk_psock(const struct sock *sk)
289 return __rcu_dereference_sk_user_data_with_flags(sk,
293 static inline void sk_psock_set_state(struct sk_psock *psock,
294 enum sk_psock_state_bits bit)
296 set_bit(bit, &psock->state);
299 static inline void sk_psock_clear_state(struct sk_psock *psock,
300 enum sk_psock_state_bits bit)
302 clear_bit(bit, &psock->state);
305 static inline bool sk_psock_test_state(const struct sk_psock *psock,
306 enum sk_psock_state_bits bit)
308 return test_bit(bit, &psock->state);
311 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
313 sk_drops_add(sk, skb);
317 static inline void sk_psock_queue_msg(struct sk_psock *psock,
320 spin_lock_bh(&psock->ingress_lock);
321 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
322 list_add_tail(&msg->list, &psock->ingress_msg);
324 sk_msg_free(psock->sk, msg);
327 spin_unlock_bh(&psock->ingress_lock);
330 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
334 spin_lock_bh(&psock->ingress_lock);
335 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
337 list_del(&msg->list);
338 spin_unlock_bh(&psock->ingress_lock);
342 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
346 spin_lock_bh(&psock->ingress_lock);
347 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
348 spin_unlock_bh(&psock->ingress_lock);
352 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
357 spin_lock_bh(&psock->ingress_lock);
358 if (list_is_last(&msg->list, &psock->ingress_msg))
361 ret = list_next_entry(msg, list);
362 spin_unlock_bh(&psock->ingress_lock);
366 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
368 return psock ? list_empty(&psock->ingress_msg) : true;
371 static inline void kfree_sk_msg(struct sk_msg *msg)
374 consume_skb(msg->skb);
378 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
380 struct sock *sk = psock->sk;
386 struct sk_psock *sk_psock_init(struct sock *sk, int node);
387 void sk_psock_stop(struct sk_psock *psock);
389 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
390 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
391 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
392 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
394 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
399 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
403 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
408 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
409 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
411 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
414 static inline struct sk_psock_link *sk_psock_init_link(void)
416 return kzalloc(sizeof(struct sk_psock_link),
417 GFP_ATOMIC | __GFP_NOWARN);
420 static inline void sk_psock_free_link(struct sk_psock_link *link)
425 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
427 static inline void sk_psock_cork_free(struct sk_psock *psock)
430 sk_msg_free(psock->sk, psock->cork);
436 static inline void sk_psock_restore_proto(struct sock *sk,
437 struct sk_psock *psock)
439 if (psock->psock_update_sk_prot)
440 psock->psock_update_sk_prot(sk, psock, true);
443 static inline struct sk_psock *sk_psock_get(struct sock *sk)
445 struct sk_psock *psock;
448 psock = sk_psock(sk);
449 if (psock && !refcount_inc_not_zero(&psock->refcnt))
455 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
457 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
459 if (refcount_dec_and_test(&psock->refcnt))
460 sk_psock_drop(sk, psock);
463 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
465 if (psock->saved_data_ready)
466 psock->saved_data_ready(sk);
468 sk->sk_data_ready(sk);
471 static inline void psock_set_prog(struct bpf_prog **pprog,
472 struct bpf_prog *prog)
474 prog = xchg(pprog, prog);
479 static inline int psock_replace_prog(struct bpf_prog **pprog,
480 struct bpf_prog *prog,
481 struct bpf_prog *old)
483 if (cmpxchg(pprog, old, prog) != old)
492 static inline void psock_progs_drop(struct sk_psock_progs *progs)
494 psock_set_prog(&progs->msg_parser, NULL);
495 psock_set_prog(&progs->stream_parser, NULL);
496 psock_set_prog(&progs->stream_verdict, NULL);
497 psock_set_prog(&progs->skb_verdict, NULL);
500 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
502 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
506 return !!psock->saved_data_ready;
509 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
511 #define BPF_F_STRPARSER (1UL << 1)
513 /* We only have two bits so far. */
514 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
516 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
518 unsigned long sk_redir = skb->_sk_redir;
520 return sk_redir & BPF_F_STRPARSER;
523 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
525 skb->_sk_redir |= BPF_F_STRPARSER;
528 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
530 unsigned long sk_redir = skb->_sk_redir;
532 return sk_redir & BPF_F_INGRESS;
535 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
537 skb->_sk_redir |= BPF_F_INGRESS;
540 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
543 skb->_sk_redir = (unsigned long)sk_redir;
545 skb->_sk_redir |= BPF_F_INGRESS;
548 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
550 unsigned long sk_redir = skb->_sk_redir;
552 return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
555 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
559 #endif /* CONFIG_NET_SOCK_MSG */
560 #endif /* _LINUX_SKMSG_H */