2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_table.h"
44 #define MAX_FORWARD_SIZE 1024
45 #ifdef CONFIG_TIPC_CRYPTO
46 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47 #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
49 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
50 #define BUF_OVERHEAD BUF_HEADROOM
53 const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
56 static unsigned int align(unsigned int i)
62 * tipc_buf_acquire - creates a TIPC message buffer
63 * @size: message size (including TIPC header)
65 * Returns a new buffer with data pointers set to the specified size.
67 * NOTE: Headroom is reserved to allow prepending of a data link header.
68 * There may also be unrequested tailroom present at the buffer's end.
70 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
74 skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
76 skb_reserve(skb, BUF_HEADROOM);
83 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
88 msg_set_user(m, user);
89 msg_set_hdr_sz(m, hsize);
90 msg_set_size(m, hsize);
91 msg_set_prevnode(m, own_node);
92 msg_set_type(m, type);
93 if (hsize > SHORT_H_SIZE) {
94 msg_set_orignode(m, own_node);
95 msg_set_destnode(m, dnode);
99 struct sk_buff *tipc_msg_create(uint user, uint type,
100 uint hdr_sz, uint data_sz, u32 dnode,
101 u32 onode, u32 dport, u32 oport, int errcode)
103 struct tipc_msg *msg;
106 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
111 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
112 msg_set_size(msg, hdr_sz + data_sz);
113 msg_set_origport(msg, oport);
114 msg_set_destport(msg, dport);
115 msg_set_errcode(msg, errcode);
116 if (hdr_sz > SHORT_H_SIZE) {
117 msg_set_orignode(msg, onode);
118 msg_set_destnode(msg, dnode);
123 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
124 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
125 * out: set when successful non-complete reassembly, otherwise NULL
126 * @*buf: in: the buffer to append. Always defined
127 * out: head buf after successful complete reassembly, otherwise NULL
128 * Returns 1 when reassembly complete, otherwise 0
130 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
132 struct sk_buff *head = *headbuf;
133 struct sk_buff *frag = *buf;
134 struct sk_buff *tail = NULL;
135 struct tipc_msg *msg;
144 fragid = msg_type(msg);
146 skb_pull(frag, msg_hdr_sz(msg));
148 if (fragid == FIRST_FRAGMENT) {
151 if (skb_has_frag_list(frag) && __skb_linearize(frag))
154 frag = skb_unshare(frag, GFP_ATOMIC);
157 head = *headbuf = frag;
158 TIPC_SKB_CB(head)->tail = NULL;
165 /* Either the input skb ownership is transferred to headskb
166 * or the input skb is freed, clear the reference to avoid
167 * bad access on error path.
170 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
171 kfree_skb_partial(frag, headstolen);
173 tail = TIPC_SKB_CB(head)->tail;
174 if (!skb_has_frag_list(head))
175 skb_shinfo(head)->frag_list = frag;
178 head->truesize += frag->truesize;
179 head->data_len += frag->len;
180 head->len += frag->len;
181 TIPC_SKB_CB(head)->tail = frag;
184 if (fragid == LAST_FRAGMENT) {
185 TIPC_SKB_CB(head)->validated = 0;
186 if (unlikely(!tipc_msg_validate(&head)))
189 TIPC_SKB_CB(head)->tail = NULL;
197 *buf = *headbuf = NULL;
202 * tipc_msg_append(): Append data to tail of an existing buffer queue
203 * @_hdr: header to be used
204 * @m: the data to be appended
205 * @mss: max allowable size of buffer
206 * @dlen: size of data to be appended
207 * @txq: queue to appand to
208 * Returns the number og 1k blocks appended or errno value
210 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
211 int mss, struct sk_buff_head *txq)
214 int accounted, total, curr;
215 int mlen, cpy, rem = dlen;
216 struct tipc_msg *hdr;
218 skb = skb_peek_tail(txq);
219 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
223 if (!skb || skb->len >= mss) {
224 skb = tipc_buf_acquire(mss, GFP_KERNEL);
228 skb_trim(skb, MIN_H_SIZE);
230 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
231 msg_set_hdr_sz(hdr, MIN_H_SIZE);
232 msg_set_size(hdr, MIN_H_SIZE);
233 __skb_queue_tail(txq, skb);
237 curr = msg_blocks(hdr);
238 mlen = msg_size(hdr);
239 cpy = min_t(size_t, rem, mss - mlen);
240 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
242 msg_set_size(hdr, mlen + cpy);
245 total += msg_blocks(hdr) - curr;
247 return total - accounted;
250 /* tipc_msg_validate - validate basic format of received message
252 * This routine ensures a TIPC message has an acceptable header, and at least
253 * as much data as the header indicates it should. The routine also ensures
254 * that the entire message header is stored in the main fragment of the message
255 * buffer, to simplify future access to message header fields.
257 * Note: Having extra info present in the message header or data areas is OK.
258 * TIPC will ignore the excess, under the assumption that it is optional info
259 * introduced by a later release of the protocol.
261 bool tipc_msg_validate(struct sk_buff **_skb)
263 struct sk_buff *skb = *_skb;
264 struct tipc_msg *hdr;
267 /* Ensure that flow control ratio condition is satisfied */
268 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
269 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
276 if (unlikely(TIPC_SKB_CB(skb)->validated))
279 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
282 hsz = msg_hdr_sz(buf_msg(skb));
283 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
285 if (unlikely(!pskb_may_pull(skb, hsz)))
289 if (unlikely(msg_version(hdr) != TIPC_VERSION))
293 if (unlikely(msz < hsz))
295 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
297 if (unlikely(skb->len < msz))
300 TIPC_SKB_CB(skb)->validated = 1;
305 * tipc_msg_fragment - build a fragment skb list for TIPC message
307 * @skb: TIPC message skb
308 * @hdr: internal msg header to be put on the top of the fragments
309 * @pktmax: max size of a fragment incl. the header
310 * @frags: returned fragment skb list
312 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
315 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
316 int pktmax, struct sk_buff_head *frags)
318 int pktno, nof_fragms, dsz, dmax, eat;
319 struct tipc_msg *_hdr;
320 struct sk_buff *_skb;
323 /* Non-linear buffer? */
324 if (skb_linearize(skb))
327 data = (u8 *)skb->data;
328 dsz = msg_size(buf_msg(skb));
329 dmax = pktmax - INT_H_SIZE;
330 if (dsz <= dmax || !dmax)
333 nof_fragms = dsz / dmax + 1;
334 for (pktno = 1; pktno <= nof_fragms; pktno++) {
335 if (pktno < nof_fragms)
339 /* Allocate a new fragment */
340 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
344 __skb_queue_tail(frags, _skb);
345 /* Copy header & data to the fragment */
346 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
347 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
349 /* Update the fragment's header */
350 _hdr = buf_msg(_skb);
351 msg_set_fragm_no(_hdr, pktno);
352 msg_set_nof_fragms(_hdr, nof_fragms);
353 msg_set_size(_hdr, INT_H_SIZE + eat);
358 __skb_queue_purge(frags);
359 __skb_queue_head_init(frags);
364 * tipc_msg_build - create buffer chain containing specified header and data
365 * @mhdr: Message header, to be prepended to data
367 * @dsz: Total length of user data
368 * @pktmax: Max packet size that can be used
369 * @list: Buffer or chain of buffers to be returned to caller
371 * Note that the recursive call we are making here is safe, since it can
372 * logically go only one further level down.
374 * Returns message data size or errno: -ENOMEM, -EFAULT
376 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
377 int dsz, int pktmax, struct sk_buff_head *list)
379 int mhsz = msg_hdr_sz(mhdr);
380 struct tipc_msg pkthdr;
381 int msz = mhsz + dsz;
390 msg_set_size(mhdr, msz);
392 /* No fragmentation needed? */
393 if (likely(msz <= pktmax)) {
394 skb = tipc_buf_acquire(msz, GFP_KERNEL);
396 /* Fall back to smaller MTU if node local message */
397 if (unlikely(!skb)) {
398 if (pktmax != MAX_MSG_SIZE)
400 rc = tipc_msg_build(mhdr, m, offset, dsz,
404 if (tipc_msg_assemble(list))
409 __skb_queue_tail(list, skb);
410 skb_copy_to_linear_data(skb, mhdr, mhsz);
411 pktpos = skb->data + mhsz;
412 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
418 /* Prepare reusable fragment header */
419 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
420 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
421 msg_set_size(&pkthdr, pktmax);
422 msg_set_fragm_no(&pkthdr, pktno);
423 msg_set_importance(&pkthdr, msg_importance(mhdr));
425 /* Prepare first fragment */
426 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
430 __skb_queue_tail(list, skb);
432 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
433 pktpos += INT_H_SIZE;
434 pktrem -= INT_H_SIZE;
435 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
443 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
452 /* Prepare new fragment: */
453 if (drem < (pktmax - INT_H_SIZE))
454 pktsz = drem + INT_H_SIZE;
457 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
463 __skb_queue_tail(list, skb);
464 msg_set_type(&pkthdr, FRAGMENT);
465 msg_set_size(&pkthdr, pktsz);
466 msg_set_fragm_no(&pkthdr, ++pktno);
467 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
468 pktpos = skb->data + INT_H_SIZE;
469 pktrem = pktsz - INT_H_SIZE;
472 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
475 __skb_queue_purge(list);
476 __skb_queue_head_init(list);
481 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
482 * @bskb: the bundle buffer to append to
483 * @msg: message to be appended
484 * @max: max allowable size for the bundle buffer
486 * Returns "true" if bundling has been performed, otherwise "false"
488 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
491 struct tipc_msg *bmsg = buf_msg(bskb);
492 u32 msz, bsz, offset, pad;
495 bsz = msg_size(bmsg);
499 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
501 if (unlikely(max < (offset + msz)))
504 skb_put(bskb, pad + msz);
505 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
506 msg_set_size(bmsg, offset + msz);
507 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
512 * tipc_msg_try_bundle - Try to bundle a new message to the last one
513 * @tskb: the last/target message to which the new one will be appended
514 * @skb: the new message skb pointer
515 * @mss: max message size (header inclusive)
516 * @dnode: destination node for the message
517 * @new_bundle: if this call made a new bundle or not
519 * Return: "true" if the new message skb is potential for bundling this time or
520 * later, in the case a bundling has been done this time, the skb is consumed
521 * (the skb pointer = NULL).
522 * Otherwise, "false" if the skb cannot be bundled at all.
524 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
525 u32 dnode, bool *new_bundle)
527 struct tipc_msg *msg, *inner, *outer;
530 /* First, check if the new buffer is suitable for bundling */
532 if (msg_user(msg) == MSG_FRAGMENTER)
534 if (msg_user(msg) == TUNNEL_PROTOCOL)
536 if (msg_user(msg) == BCAST_PROTOCOL)
538 if (mss <= INT_H_SIZE + msg_size(msg))
541 /* Ok, but the last/target buffer can be empty? */
545 /* Is it a bundle already? Try to bundle the new message to it */
546 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
551 /* Make a new bundle of the two messages if possible */
552 tsz = msg_size(buf_msg(tskb));
553 if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
555 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
558 inner = buf_msg(tskb);
559 skb_push(tskb, INT_H_SIZE);
560 outer = buf_msg(tskb);
561 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
563 msg_set_importance(outer, msg_importance(inner));
564 msg_set_size(outer, INT_H_SIZE + tsz);
565 msg_set_msgcnt(outer, 1);
569 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
577 * tipc_msg_extract(): extract bundled inner packet from buffer
578 * @skb: buffer to be extracted from.
579 * @iskb: extracted inner buffer, to be returned
580 * @pos: position in outer message of msg to be extracted.
581 * Returns position of next msg
582 * Consumes outer buffer when last packet extracted
583 * Returns true when there is an extracted buffer, otherwise false
585 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
587 struct tipc_msg *hdr, *ihdr;
591 if (unlikely(skb_linearize(skb)))
595 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
598 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
599 imsz = msg_size(ihdr);
601 if ((*pos + imsz) > msg_data_sz(hdr))
604 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
608 skb_copy_to_linear_data(*iskb, ihdr, imsz);
609 if (unlikely(!tipc_msg_validate(iskb)))
622 * tipc_msg_reverse(): swap source and destination addresses and add error code
623 * @own_node: originating node id for reversed message
624 * @skb: buffer containing message to be reversed; will be consumed
625 * @err: error code to be set in message, if any
626 * Replaces consumed buffer with new one when successful
627 * Returns true if success, otherwise false
629 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
631 struct sk_buff *_skb = *skb;
632 struct tipc_msg *_hdr, *hdr;
635 if (skb_linearize(_skb))
637 _hdr = buf_msg(_skb);
638 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
639 hlen = msg_hdr_sz(_hdr);
641 if (msg_dest_droppable(_hdr))
643 if (msg_errcode(_hdr))
646 /* Never return SHORT header */
647 if (hlen == SHORT_H_SIZE)
650 /* Don't return data along with SYN+, - sender has a clone */
651 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
654 /* Allocate new buffer to return */
655 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
658 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
659 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
661 /* Build reverse header in new buffer */
663 msg_set_hdr_sz(hdr, hlen);
664 msg_set_errcode(hdr, err);
665 msg_set_non_seq(hdr, 0);
666 msg_set_origport(hdr, msg_destport(_hdr));
667 msg_set_destport(hdr, msg_origport(_hdr));
668 msg_set_destnode(hdr, msg_prevnode(_hdr));
669 msg_set_prevnode(hdr, own_node);
670 msg_set_orignode(hdr, own_node);
671 msg_set_size(hdr, hlen + dlen);
681 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
683 struct sk_buff *skb, *_skb;
685 skb_queue_walk(msg, skb) {
686 _skb = skb_clone(skb, GFP_ATOMIC);
688 __skb_queue_purge(cpy);
689 pr_err_ratelimited("Failed to clone buffer chain\n");
692 __skb_queue_tail(cpy, _skb);
698 * tipc_msg_lookup_dest(): try to find new destination for named message
699 * @skb: the buffer containing the message.
700 * @err: error code to be used by caller if lookup fails
701 * Does not consume buffer
702 * Returns true if a destination is found, false otherwise
704 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
706 struct tipc_msg *msg = buf_msg(skb);
708 u32 onode = tipc_own_addr(net);
710 if (!msg_isdata(msg))
714 if (msg_errcode(msg))
716 *err = TIPC_ERR_NO_NAME;
717 if (skb_linearize(skb))
720 if (msg_reroute_cnt(msg))
722 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
723 dport = tipc_nametbl_translate(net, msg_nametype(msg),
724 msg_nameinst(msg), &dnode);
727 msg_incr_reroute_cnt(msg);
729 msg_set_prevnode(msg, onode);
730 msg_set_destnode(msg, dnode);
731 msg_set_destport(msg, dport);
737 /* tipc_msg_assemble() - assemble chain of fragments into one message
739 bool tipc_msg_assemble(struct sk_buff_head *list)
741 struct sk_buff *skb, *tmp = NULL;
743 if (skb_queue_len(list) == 1)
746 while ((skb = __skb_dequeue(list))) {
748 if (tipc_buf_append(&tmp, &skb)) {
749 __skb_queue_tail(list, skb);
755 __skb_queue_purge(list);
756 __skb_queue_head_init(list);
757 pr_warn("Failed do assemble buffer\n");
761 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
762 * reassemble the clones into one message
764 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
766 struct sk_buff *skb, *_skb;
767 struct sk_buff *frag = NULL;
768 struct sk_buff *head = NULL;
771 /* Copy header if single buffer */
772 if (skb_queue_len(list) == 1) {
773 skb = skb_peek(list);
774 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
775 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
778 __skb_queue_tail(rcvq, _skb);
782 /* Clone all fragments and reassemble */
783 skb_queue_walk(list, skb) {
784 frag = skb_clone(skb, GFP_ATOMIC);
788 if (tipc_buf_append(&head, &frag))
793 __skb_queue_tail(rcvq, frag);
796 pr_warn("Failed do clone local mcast rcv buffer\n");
801 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
802 struct sk_buff_head *cpy)
804 struct sk_buff *skb, *_skb;
806 skb_queue_walk(msg, skb) {
807 _skb = pskb_copy(skb, GFP_ATOMIC);
809 __skb_queue_purge(cpy);
812 msg_set_destnode(buf_msg(_skb), dst);
813 __skb_queue_tail(cpy, _skb);
818 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
819 * @list: list to be appended to
820 * @seqno: sequence number of buffer to add
821 * @skb: buffer to add
823 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
826 struct sk_buff *_skb, *tmp;
828 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
829 __skb_queue_head(list, skb);
833 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
834 __skb_queue_tail(list, skb);
838 skb_queue_walk_safe(list, _skb, tmp) {
839 if (more(seqno, buf_seqno(_skb)))
841 if (seqno == buf_seqno(_skb))
843 __skb_queue_before(list, _skb, skb);
850 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
851 struct sk_buff_head *xmitq)
853 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
854 __skb_queue_tail(xmitq, skb);