2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_table.h"
43 #define MAX_FORWARD_SIZE 1024
44 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
45 #define BUF_TAILROOM 16
47 static unsigned int align(unsigned int i)
53 * tipc_buf_acquire - creates a TIPC message buffer
54 * @size: message size (including TIPC header)
56 * Returns a new buffer with data pointers set to the specified size.
58 * NOTE: Headroom is reserved to allow prepending of a data link header.
59 * There may also be unrequested tailroom present at the buffer's end.
61 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
66 skb = alloc_skb_fclone(buf_size, gfp);
68 skb_reserve(skb, BUF_HEADROOM);
75 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
80 msg_set_user(m, user);
81 msg_set_hdr_sz(m, hsize);
82 msg_set_size(m, hsize);
83 msg_set_prevnode(m, own_node);
84 msg_set_type(m, type);
85 if (hsize > SHORT_H_SIZE) {
86 msg_set_orignode(m, own_node);
87 msg_set_destnode(m, dnode);
91 struct sk_buff *tipc_msg_create(uint user, uint type,
92 uint hdr_sz, uint data_sz, u32 dnode,
93 u32 onode, u32 dport, u32 oport, int errcode)
98 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
103 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
104 msg_set_size(msg, hdr_sz + data_sz);
105 msg_set_origport(msg, oport);
106 msg_set_destport(msg, dport);
107 msg_set_errcode(msg, errcode);
108 if (hdr_sz > SHORT_H_SIZE) {
109 msg_set_orignode(msg, onode);
110 msg_set_destnode(msg, dnode);
115 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
116 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
117 * out: set when successful non-complete reassembly, otherwise NULL
118 * @*buf: in: the buffer to append. Always defined
119 * out: head buf after successful complete reassembly, otherwise NULL
120 * Returns 1 when reassembly complete, otherwise 0
122 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
124 struct sk_buff *head = *headbuf;
125 struct sk_buff *frag = *buf;
126 struct sk_buff *tail = NULL;
127 struct tipc_msg *msg;
136 fragid = msg_type(msg);
138 skb_pull(frag, msg_hdr_sz(msg));
140 if (fragid == FIRST_FRAGMENT) {
144 if (skb_has_frag_list(frag) && __skb_linearize(frag))
146 frag = skb_unshare(frag, GFP_ATOMIC);
149 head = *headbuf = frag;
150 TIPC_SKB_CB(head)->tail = NULL;
157 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
158 kfree_skb_partial(frag, headstolen);
160 tail = TIPC_SKB_CB(head)->tail;
161 if (!skb_has_frag_list(head))
162 skb_shinfo(head)->frag_list = frag;
165 head->truesize += frag->truesize;
166 head->data_len += frag->len;
167 head->len += frag->len;
168 TIPC_SKB_CB(head)->tail = frag;
171 if (fragid == LAST_FRAGMENT) {
172 TIPC_SKB_CB(head)->validated = false;
173 if (unlikely(!tipc_msg_validate(&head)))
176 TIPC_SKB_CB(head)->tail = NULL;
185 *buf = *headbuf = NULL;
189 /* tipc_msg_validate - validate basic format of received message
191 * This routine ensures a TIPC message has an acceptable header, and at least
192 * as much data as the header indicates it should. The routine also ensures
193 * that the entire message header is stored in the main fragment of the message
194 * buffer, to simplify future access to message header fields.
196 * Note: Having extra info present in the message header or data areas is OK.
197 * TIPC will ignore the excess, under the assumption that it is optional info
198 * introduced by a later release of the protocol.
200 bool tipc_msg_validate(struct sk_buff **_skb)
202 struct sk_buff *skb = *_skb;
203 struct tipc_msg *hdr;
206 /* Ensure that flow control ratio condition is satisfied */
207 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
208 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
215 if (unlikely(TIPC_SKB_CB(skb)->validated))
217 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
220 hsz = msg_hdr_sz(buf_msg(skb));
221 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
223 if (unlikely(!pskb_may_pull(skb, hsz)))
227 if (unlikely(msg_version(hdr) != TIPC_VERSION))
231 if (unlikely(msz < hsz))
233 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
235 if (unlikely(skb->len < msz))
238 TIPC_SKB_CB(skb)->validated = true;
243 * tipc_msg_build - create buffer chain containing specified header and data
244 * @mhdr: Message header, to be prepended to data
246 * @dsz: Total length of user data
247 * @pktmax: Max packet size that can be used
248 * @list: Buffer or chain of buffers to be returned to caller
250 * Note that the recursive call we are making here is safe, since it can
251 * logically go only one further level down.
253 * Returns message data size or errno: -ENOMEM, -EFAULT
255 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
256 int dsz, int pktmax, struct sk_buff_head *list)
258 int mhsz = msg_hdr_sz(mhdr);
259 struct tipc_msg pkthdr;
260 int msz = mhsz + dsz;
269 msg_set_size(mhdr, msz);
271 /* No fragmentation needed? */
272 if (likely(msz <= pktmax)) {
273 skb = tipc_buf_acquire(msz, GFP_KERNEL);
275 /* Fall back to smaller MTU if node local message */
276 if (unlikely(!skb)) {
277 if (pktmax != MAX_MSG_SIZE)
279 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
282 if (tipc_msg_assemble(list))
287 __skb_queue_tail(list, skb);
288 skb_copy_to_linear_data(skb, mhdr, mhsz);
289 pktpos = skb->data + mhsz;
290 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
296 /* Prepare reusable fragment header */
297 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
298 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
299 msg_set_size(&pkthdr, pktmax);
300 msg_set_fragm_no(&pkthdr, pktno);
301 msg_set_importance(&pkthdr, msg_importance(mhdr));
303 /* Prepare first fragment */
304 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
308 __skb_queue_tail(list, skb);
310 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
311 pktpos += INT_H_SIZE;
312 pktrem -= INT_H_SIZE;
313 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
321 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
330 /* Prepare new fragment: */
331 if (drem < (pktmax - INT_H_SIZE))
332 pktsz = drem + INT_H_SIZE;
335 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
341 __skb_queue_tail(list, skb);
342 msg_set_type(&pkthdr, FRAGMENT);
343 msg_set_size(&pkthdr, pktsz);
344 msg_set_fragm_no(&pkthdr, ++pktno);
345 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
346 pktpos = skb->data + INT_H_SIZE;
347 pktrem = pktsz - INT_H_SIZE;
350 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
353 __skb_queue_purge(list);
354 __skb_queue_head_init(list);
359 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
360 * @skb: the buffer to append to ("bundle")
361 * @msg: message to be appended
362 * @mtu: max allowable size for the bundle buffer
363 * Consumes buffer if successful
364 * Returns true if bundling could be performed, otherwise false
366 bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
368 struct tipc_msg *bmsg;
370 unsigned int msz = msg_size(msg);
372 u32 max = mtu - INT_H_SIZE;
374 if (likely(msg_user(msg) == MSG_FRAGMENTER))
379 bsz = msg_size(bmsg);
383 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
385 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
387 if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
389 if (unlikely(skb_tailroom(skb) < (pad + msz)))
391 if (unlikely(max < (start + msz)))
393 if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
394 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
397 skb_put(skb, pad + msz);
398 skb_copy_to_linear_data_offset(skb, start, msg, msz);
399 msg_set_size(bmsg, start + msz);
400 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
405 * tipc_msg_extract(): extract bundled inner packet from buffer
406 * @skb: buffer to be extracted from.
407 * @iskb: extracted inner buffer, to be returned
408 * @pos: position in outer message of msg to be extracted.
409 * Returns position of next msg
410 * Consumes outer buffer when last packet extracted
411 * Returns true when when there is an extracted buffer, otherwise false
413 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
415 struct tipc_msg *hdr, *ihdr;
419 if (unlikely(skb_linearize(skb)))
423 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
426 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
427 imsz = msg_size(ihdr);
429 if ((*pos + imsz) > msg_data_sz(hdr))
432 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
436 skb_copy_to_linear_data(*iskb, ihdr, imsz);
437 if (unlikely(!tipc_msg_validate(iskb)))
450 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
451 * @list: the buffer chain, where head is the buffer to replace/append
452 * @skb: buffer to be created, appended to and returned in case of success
453 * @msg: message to be appended
454 * @mtu: max allowable size for the bundle buffer, inclusive header
455 * @dnode: destination node for message. (Not always present in header)
456 * Returns true if success, otherwise false
458 bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
461 struct sk_buff *_skb;
462 struct tipc_msg *bmsg;
463 u32 msz = msg_size(msg);
464 u32 max = mtu - INT_H_SIZE;
466 if (msg_user(msg) == MSG_FRAGMENTER)
468 if (msg_user(msg) == TUNNEL_PROTOCOL)
470 if (msg_user(msg) == BCAST_PROTOCOL)
475 _skb = tipc_buf_acquire(max, GFP_ATOMIC);
479 skb_trim(_skb, INT_H_SIZE);
480 bmsg = buf_msg(_skb);
481 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
483 msg_set_importance(bmsg, msg_importance(msg));
484 msg_set_seqno(bmsg, msg_seqno(msg));
485 msg_set_ack(bmsg, msg_ack(msg));
486 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
487 tipc_msg_bundle(_skb, msg, mtu);
493 * tipc_msg_reverse(): swap source and destination addresses and add error code
494 * @own_node: originating node id for reversed message
495 * @skb: buffer containing message to be reversed; may be replaced.
496 * @err: error code to be set in message, if any
497 * Consumes buffer at failure
498 * Returns true if success, otherwise false
500 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
502 struct sk_buff *_skb = *skb;
503 struct tipc_msg *hdr;
504 struct tipc_msg ohdr;
507 if (skb_linearize(_skb))
510 dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
511 if (msg_dest_droppable(hdr))
513 if (msg_errcode(hdr))
516 /* Take a copy of original header before altering message */
517 memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
519 /* Never return SHORT header; expand by replacing buffer if necessary */
520 if (msg_short(hdr)) {
521 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
524 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
528 memcpy(hdr, &ohdr, BASIC_H_SIZE);
529 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
532 /* Now reverse the concerned fields */
533 msg_set_errcode(hdr, err);
534 msg_set_non_seq(hdr, 0);
535 msg_set_origport(hdr, msg_destport(&ohdr));
536 msg_set_destport(hdr, msg_origport(&ohdr));
537 msg_set_destnode(hdr, msg_prevnode(&ohdr));
538 msg_set_prevnode(hdr, own_node);
539 msg_set_orignode(hdr, own_node);
540 msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
541 skb_trim(_skb, msg_size(hdr));
551 * tipc_msg_lookup_dest(): try to find new destination for named message
552 * @skb: the buffer containing the message.
553 * @err: error code to be used by caller if lookup fails
554 * Does not consume buffer
555 * Returns true if a destination is found, false otherwise
557 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
559 struct tipc_msg *msg = buf_msg(skb);
561 u32 onode = tipc_own_addr(net);
563 if (!msg_isdata(msg))
567 if (msg_errcode(msg))
569 *err = TIPC_ERR_NO_NAME;
570 if (skb_linearize(skb))
573 if (msg_reroute_cnt(msg))
575 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
576 dport = tipc_nametbl_translate(net, msg_nametype(msg),
577 msg_nameinst(msg), &dnode);
580 msg_incr_reroute_cnt(msg);
582 msg_set_prevnode(msg, onode);
583 msg_set_destnode(msg, dnode);
584 msg_set_destport(msg, dport);
587 if (!skb_cloned(skb))
593 /* tipc_msg_assemble() - assemble chain of fragments into one message
595 bool tipc_msg_assemble(struct sk_buff_head *list)
597 struct sk_buff *skb, *tmp = NULL;
599 if (skb_queue_len(list) == 1)
602 while ((skb = __skb_dequeue(list))) {
604 if (tipc_buf_append(&tmp, &skb)) {
605 __skb_queue_tail(list, skb);
611 __skb_queue_purge(list);
612 __skb_queue_head_init(list);
613 pr_warn("Failed do assemble buffer\n");
617 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
618 * reassemble the clones into one message
620 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
622 struct sk_buff *skb, *_skb;
623 struct sk_buff *frag = NULL;
624 struct sk_buff *head = NULL;
627 /* Copy header if single buffer */
628 if (skb_queue_len(list) == 1) {
629 skb = skb_peek(list);
630 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
631 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
634 __skb_queue_tail(rcvq, _skb);
638 /* Clone all fragments and reassemble */
639 skb_queue_walk(list, skb) {
640 frag = skb_clone(skb, GFP_ATOMIC);
644 if (tipc_buf_append(&head, &frag))
649 __skb_queue_tail(rcvq, frag);
652 pr_warn("Failed do clone local mcast rcv buffer\n");
657 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
658 struct sk_buff_head *cpy)
660 struct sk_buff *skb, *_skb;
662 skb_queue_walk(msg, skb) {
663 _skb = pskb_copy(skb, GFP_ATOMIC);
665 __skb_queue_purge(cpy);
668 msg_set_destnode(buf_msg(_skb), dst);
669 __skb_queue_tail(cpy, _skb);
674 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
675 * @list: list to be appended to
676 * @seqno: sequence number of buffer to add
677 * @skb: buffer to add
679 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
682 struct sk_buff *_skb, *tmp;
684 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
685 __skb_queue_head(list, skb);
689 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
690 __skb_queue_tail(list, skb);
694 skb_queue_walk_safe(list, _skb, tmp) {
695 if (more(seqno, buf_seqno(_skb)))
697 if (seqno == buf_seqno(_skb))
699 __skb_queue_before(list, _skb, skb);
705 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
706 struct sk_buff_head *xmitq)
708 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
709 __skb_queue_tail(xmitq, skb);