2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <linux/uaccess.h>
77 #include <asm/ioctls.h>
79 #include <asm/cacheflush.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
93 #include <net/inet_common.h>
95 #include <linux/bpf.h>
96 #include <net/compat.h>
102 - if device has no dev->hard_header routine, it adds and removes ll header
103 inside itself. In this case ll header is invisible outside of device,
104 but higher levels still should reserve dev->hard_header_len.
105 Some devices are enough clever to reallocate skb, when header
106 will not fit to reserved space (tunnel), another ones are silly
108 - packet socket receives packets with pulled ll header,
109 so that SOCK_RAW should push it back.
114 Incoming, dev->hard_header!=NULL
115 mac_header -> ll header
118 Outgoing, dev->hard_header!=NULL
119 mac_header -> ll header
122 Incoming, dev->hard_header==NULL
123 mac_header -> UNKNOWN position. It is very likely, that it points to ll
124 header. PPP makes it, that is wrong, because introduce
125 assymetry between rx and tx paths.
128 Outgoing, dev->hard_header==NULL
129 mac_header -> data. ll header is still not built!
133 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
139 dev->hard_header != NULL
140 mac_header -> ll header
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
147 We should set nh.raw on output to correct posistion,
148 packet classifier depends on it.
151 /* Private packet socket structures. */
153 /* identical to struct packet_mreq except it has
154 * a longer address field.
156 struct packet_mreq_max {
158 unsigned short mr_type;
159 unsigned short mr_alen;
160 unsigned char mr_address[MAX_ADDR_LEN];
164 struct tpacket_hdr *h1;
165 struct tpacket2_hdr *h2;
166 struct tpacket3_hdr *h3;
170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171 int closing, int tx_ring);
173 #define V3_ALIGNMENT (8)
175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
190 struct packet_type *pt, struct net_device *orig_dev);
192 static void *packet_previous_frame(struct packet_sock *po,
193 struct packet_ring_buffer *rb,
195 static void packet_increment_head(struct packet_ring_buffer *buff);
196 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
198 struct packet_sock *);
199 static void prb_retire_current_block(struct tpacket_kbdq_core *,
200 struct packet_sock *, unsigned int status);
201 static int prb_queue_frozen(struct tpacket_kbdq_core *);
202 static void prb_open_block(struct tpacket_kbdq_core *,
203 struct tpacket_block_desc *);
204 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208 struct tpacket3_hdr *);
209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211 static void packet_flush_mclist(struct sock *sk);
212 static u16 packet_pick_tx_queue(struct sk_buff *skb);
214 struct packet_skb_cb {
216 struct sockaddr_pkt pkt;
218 /* Trick: alias skb original length with
219 * ll.sll_family and ll.protocol in order
222 unsigned int origlen;
223 struct sockaddr_ll ll;
228 #define vio_le() virtio_legacy_is_little_endian()
230 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
232 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233 #define GET_PBLOCK_DESC(x, bid) \
234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237 #define GET_NEXT_PRB_BLK_NUM(x) \
238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239 ((x)->kactive_blk_num+1) : 0)
241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
242 static void __fanout_link(struct sock *sk, struct packet_sock *po);
244 static int packet_direct_xmit(struct sk_buff *skb)
246 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
249 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
251 struct net_device *dev;
254 dev = rcu_dereference(po->cached_dev);
262 static void packet_cached_dev_assign(struct packet_sock *po,
263 struct net_device *dev)
265 rcu_assign_pointer(po->cached_dev, dev);
268 static void packet_cached_dev_reset(struct packet_sock *po)
270 RCU_INIT_POINTER(po->cached_dev, NULL);
273 static bool packet_use_direct_xmit(const struct packet_sock *po)
275 return po->xmit == packet_direct_xmit;
278 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
279 struct net_device *sb_dev)
281 return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
284 static u16 packet_pick_tx_queue(struct sk_buff *skb)
286 struct net_device *dev = skb->dev;
287 const struct net_device_ops *ops = dev->netdev_ops;
290 if (ops->ndo_select_queue) {
291 queue_index = ops->ndo_select_queue(dev, skb, NULL,
292 __packet_pick_tx_queue);
293 queue_index = netdev_cap_txqueue(dev, queue_index);
295 queue_index = __packet_pick_tx_queue(dev, skb, NULL);
301 /* __register_prot_hook must be invoked through register_prot_hook
302 * or from a context in which asynchronous accesses to the packet
303 * socket is not possible (packet_create()).
305 static void __register_prot_hook(struct sock *sk)
307 struct packet_sock *po = pkt_sk(sk);
311 __fanout_link(sk, po);
313 dev_add_pack(&po->prot_hook);
320 static void register_prot_hook(struct sock *sk)
322 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
323 __register_prot_hook(sk);
326 /* If the sync parameter is true, we will temporarily drop
327 * the po->bind_lock and do a synchronize_net to make sure no
328 * asynchronous packet processing paths still refer to the elements
329 * of po->prot_hook. If the sync parameter is false, it is the
330 * callers responsibility to take care of this.
332 static void __unregister_prot_hook(struct sock *sk, bool sync)
334 struct packet_sock *po = pkt_sk(sk);
336 lockdep_assert_held_once(&po->bind_lock);
341 __fanout_unlink(sk, po);
343 __dev_remove_pack(&po->prot_hook);
348 spin_unlock(&po->bind_lock);
350 spin_lock(&po->bind_lock);
354 static void unregister_prot_hook(struct sock *sk, bool sync)
356 struct packet_sock *po = pkt_sk(sk);
359 __unregister_prot_hook(sk, sync);
362 static inline struct page * __pure pgv_to_page(void *addr)
364 if (is_vmalloc_addr(addr))
365 return vmalloc_to_page(addr);
366 return virt_to_page(addr);
369 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
371 union tpacket_uhdr h;
374 switch (po->tp_version) {
376 h.h1->tp_status = status;
377 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
380 h.h2->tp_status = status;
381 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
384 h.h3->tp_status = status;
385 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
388 WARN(1, "TPACKET version not supported.\n");
395 static int __packet_get_status(struct packet_sock *po, void *frame)
397 union tpacket_uhdr h;
402 switch (po->tp_version) {
404 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
405 return h.h1->tp_status;
407 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
408 return h.h2->tp_status;
410 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
411 return h.h3->tp_status;
413 WARN(1, "TPACKET version not supported.\n");
419 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
422 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
425 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
426 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
427 return TP_STATUS_TS_RAW_HARDWARE;
429 if (ktime_to_timespec_cond(skb->tstamp, ts))
430 return TP_STATUS_TS_SOFTWARE;
435 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
438 union tpacket_uhdr h;
442 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
446 switch (po->tp_version) {
448 h.h1->tp_sec = ts.tv_sec;
449 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
452 h.h2->tp_sec = ts.tv_sec;
453 h.h2->tp_nsec = ts.tv_nsec;
456 h.h3->tp_sec = ts.tv_sec;
457 h.h3->tp_nsec = ts.tv_nsec;
460 WARN(1, "TPACKET version not supported.\n");
464 /* one flush is safe, as both fields always lie on the same cacheline */
465 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
471 static void *packet_lookup_frame(struct packet_sock *po,
472 struct packet_ring_buffer *rb,
473 unsigned int position,
476 unsigned int pg_vec_pos, frame_offset;
477 union tpacket_uhdr h;
479 pg_vec_pos = position / rb->frames_per_block;
480 frame_offset = position % rb->frames_per_block;
482 h.raw = rb->pg_vec[pg_vec_pos].buffer +
483 (frame_offset * rb->frame_size);
485 if (status != __packet_get_status(po, h.raw))
491 static void *packet_current_frame(struct packet_sock *po,
492 struct packet_ring_buffer *rb,
495 return packet_lookup_frame(po, rb, rb->head, status);
498 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
500 del_timer_sync(&pkc->retire_blk_timer);
503 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
504 struct sk_buff_head *rb_queue)
506 struct tpacket_kbdq_core *pkc;
508 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
510 spin_lock_bh(&rb_queue->lock);
511 pkc->delete_blk_timer = 1;
512 spin_unlock_bh(&rb_queue->lock);
514 prb_del_retire_blk_timer(pkc);
517 static void prb_setup_retire_blk_timer(struct packet_sock *po)
519 struct tpacket_kbdq_core *pkc;
521 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
522 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
524 pkc->retire_blk_timer.expires = jiffies;
527 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528 int blk_size_in_bytes)
530 struct net_device *dev;
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532 struct ethtool_link_ksettings ecmd;
536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
537 if (unlikely(!dev)) {
539 return DEFAULT_PRB_RETIRE_TOV;
541 err = __ethtool_get_link_ksettings(dev, &ecmd);
545 * If the link speed is so slow you don't really
546 * need to worry about perf anyways
548 if (ecmd.base.speed < SPEED_1000 ||
549 ecmd.base.speed == SPEED_UNKNOWN) {
550 return DEFAULT_PRB_RETIRE_TOV;
553 div = ecmd.base.speed / 1000;
556 return DEFAULT_PRB_RETIRE_TOV;
558 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
570 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
571 union tpacket_req_u *req_u)
573 p1->feature_req_word = req_u->req3.tp_feature_req_word;
576 static void init_prb_bdqc(struct packet_sock *po,
577 struct packet_ring_buffer *rb,
579 union tpacket_req_u *req_u)
581 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
582 struct tpacket_block_desc *pbd;
584 memset(p1, 0x0, sizeof(*p1));
586 p1->knxt_seq_num = 1;
588 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
589 p1->pkblk_start = pg_vec[0].buffer;
590 p1->kblk_size = req_u->req3.tp_block_size;
591 p1->knum_blocks = req_u->req3.tp_block_nr;
592 p1->hdrlen = po->tp_hdrlen;
593 p1->version = po->tp_version;
594 p1->last_kactive_blk_num = 0;
595 po->stats.stats3.tp_freeze_q_cnt = 0;
596 if (req_u->req3.tp_retire_blk_tov)
597 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
599 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
600 req_u->req3.tp_block_size);
601 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
602 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
604 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
605 prb_init_ft_ops(p1, req_u);
606 prb_setup_retire_blk_timer(po);
607 prb_open_block(p1, pbd);
610 /* Do NOT update the last_blk_num first.
611 * Assumes sk_buff_head lock is held.
613 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
615 mod_timer(&pkc->retire_blk_timer,
616 jiffies + pkc->tov_in_jiffies);
617 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
622 * 1) We refresh the timer only when we open a block.
623 * By doing this we don't waste cycles refreshing the timer
624 * on packet-by-packet basis.
626 * With a 1MB block-size, on a 1Gbps line, it will take
627 * i) ~8 ms to fill a block + ii) memcpy etc.
628 * In this cut we are not accounting for the memcpy time.
630 * So, if the user sets the 'tmo' to 10ms then the timer
631 * will never fire while the block is still getting filled
632 * (which is what we want). However, the user could choose
633 * to close a block early and that's fine.
635 * But when the timer does fire, we check whether or not to refresh it.
636 * Since the tmo granularity is in msecs, it is not too expensive
637 * to refresh the timer, lets say every '8' msecs.
638 * Either the user can set the 'tmo' or we can derive it based on
639 * a) line-speed and b) block-size.
640 * prb_calc_retire_blk_tmo() calculates the tmo.
643 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
645 struct packet_sock *po =
646 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
647 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
649 struct tpacket_block_desc *pbd;
651 spin_lock(&po->sk.sk_receive_queue.lock);
653 frozen = prb_queue_frozen(pkc);
654 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
656 if (unlikely(pkc->delete_blk_timer))
659 /* We only need to plug the race when the block is partially filled.
661 * lock(); increment BLOCK_NUM_PKTS; unlock()
662 * copy_bits() is in progress ...
663 * timer fires on other cpu:
664 * we can't retire the current block because copy_bits
668 if (BLOCK_NUM_PKTS(pbd)) {
669 while (atomic_read(&pkc->blk_fill_in_prog)) {
670 /* Waiting for skb_copy_bits to finish... */
675 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
677 if (!BLOCK_NUM_PKTS(pbd)) {
678 /* An empty block. Just refresh the timer. */
681 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
682 if (!prb_dispatch_next_block(pkc, po))
687 /* Case 1. Queue was frozen because user-space was
690 if (prb_curr_blk_in_use(pbd)) {
692 * Ok, user-space is still behind.
693 * So just refresh the timer.
697 /* Case 2. queue was frozen,user-space caught up,
698 * now the link went idle && the timer fired.
699 * We don't have a block to close.So we open this
700 * block and restart the timer.
701 * opening a block thaws the queue,restarts timer
702 * Thawing/timer-refresh is a side effect.
704 prb_open_block(pkc, pbd);
711 _prb_refresh_rx_retire_blk_timer(pkc);
714 spin_unlock(&po->sk.sk_receive_queue.lock);
717 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
718 struct tpacket_block_desc *pbd1, __u32 status)
720 /* Flush everything minus the block header */
722 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
727 /* Skip the block header(we know header WILL fit in 4K) */
730 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
731 for (; start < end; start += PAGE_SIZE)
732 flush_dcache_page(pgv_to_page(start));
737 /* Now update the block status. */
739 BLOCK_STATUS(pbd1) = status;
741 /* Flush the block header */
743 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
745 flush_dcache_page(pgv_to_page(start));
755 * 2) Increment active_blk_num
757 * Note:We DONT refresh the timer on purpose.
758 * Because almost always the next block will be opened.
760 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
761 struct tpacket_block_desc *pbd1,
762 struct packet_sock *po, unsigned int stat)
764 __u32 status = TP_STATUS_USER | stat;
766 struct tpacket3_hdr *last_pkt;
767 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
768 struct sock *sk = &po->sk;
770 if (po->stats.stats3.tp_drops)
771 status |= TP_STATUS_LOSING;
773 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
774 last_pkt->tp_next_offset = 0;
776 /* Get the ts of the last pkt */
777 if (BLOCK_NUM_PKTS(pbd1)) {
778 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
779 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
781 /* Ok, we tmo'd - so get the current time.
783 * It shouldn't really happen as we don't close empty
784 * blocks. See prb_retire_rx_blk_timer_expired().
788 h1->ts_last_pkt.ts_sec = ts.tv_sec;
789 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
794 /* Flush the block */
795 prb_flush_block(pkc1, pbd1, status);
797 sk->sk_data_ready(sk);
799 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
802 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
804 pkc->reset_pending_on_curr_blk = 0;
808 * Side effect of opening a block:
810 * 1) prb_queue is thawed.
811 * 2) retire_blk_timer is refreshed.
814 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
815 struct tpacket_block_desc *pbd1)
818 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
822 /* We could have just memset this but we will lose the
823 * flexibility of making the priv area sticky
826 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
827 BLOCK_NUM_PKTS(pbd1) = 0;
828 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
832 h1->ts_first_pkt.ts_sec = ts.tv_sec;
833 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
835 pkc1->pkblk_start = (char *)pbd1;
836 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
838 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
839 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
841 pbd1->version = pkc1->version;
842 pkc1->prev = pkc1->nxt_offset;
843 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
845 prb_thaw_queue(pkc1);
846 _prb_refresh_rx_retire_blk_timer(pkc1);
852 * Queue freeze logic:
853 * 1) Assume tp_block_nr = 8 blocks.
854 * 2) At time 't0', user opens Rx ring.
855 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
856 * 4) user-space is either sleeping or processing block '0'.
857 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
858 * it will close block-7,loop around and try to fill block '0'.
860 * __packet_lookup_frame_in_block
861 * prb_retire_current_block()
862 * prb_dispatch_next_block()
863 * |->(BLOCK_STATUS == USER) evaluates to true
864 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
865 * 6) Now there are two cases:
866 * 6.1) Link goes idle right after the queue is frozen.
867 * But remember, the last open_block() refreshed the timer.
868 * When this timer expires,it will refresh itself so that we can
869 * re-open block-0 in near future.
870 * 6.2) Link is busy and keeps on receiving packets. This is a simple
871 * case and __packet_lookup_frame_in_block will check if block-0
872 * is free and can now be re-used.
874 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
875 struct packet_sock *po)
877 pkc->reset_pending_on_curr_blk = 1;
878 po->stats.stats3.tp_freeze_q_cnt++;
881 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
884 * If the next block is free then we will dispatch it
885 * and return a good offset.
886 * Else, we will freeze the queue.
887 * So, caller must check the return value.
889 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
890 struct packet_sock *po)
892 struct tpacket_block_desc *pbd;
896 /* 1. Get current block num */
897 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
899 /* 2. If this block is currently in_use then freeze the queue */
900 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
901 prb_freeze_queue(pkc, po);
907 * open this block and return the offset where the first packet
908 * needs to get stored.
910 prb_open_block(pkc, pbd);
911 return (void *)pkc->nxt_offset;
914 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
915 struct packet_sock *po, unsigned int status)
917 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
919 /* retire/close the current block */
920 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
922 * Plug the case where copy_bits() is in progress on
923 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
924 * have space to copy the pkt in the current block and
925 * called prb_retire_current_block()
927 * We don't need to worry about the TMO case because
928 * the timer-handler already handled this case.
930 if (!(status & TP_STATUS_BLK_TMO)) {
931 while (atomic_read(&pkc->blk_fill_in_prog)) {
932 /* Waiting for skb_copy_bits to finish... */
936 prb_close_block(pkc, pbd, po, status);
941 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
943 return TP_STATUS_USER & BLOCK_STATUS(pbd);
946 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
948 return pkc->reset_pending_on_curr_blk;
951 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
952 __releases(&pkc->blk_fill_in_prog_lock)
954 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
955 atomic_dec(&pkc->blk_fill_in_prog);
958 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
959 struct tpacket3_hdr *ppd)
961 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
964 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
965 struct tpacket3_hdr *ppd)
967 ppd->hv1.tp_rxhash = 0;
970 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
971 struct tpacket3_hdr *ppd)
973 if (skb_vlan_tag_present(pkc->skb)) {
974 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
975 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
976 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
978 ppd->hv1.tp_vlan_tci = 0;
979 ppd->hv1.tp_vlan_tpid = 0;
980 ppd->tp_status = TP_STATUS_AVAILABLE;
984 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
985 struct tpacket3_hdr *ppd)
987 ppd->hv1.tp_padding = 0;
988 prb_fill_vlan_info(pkc, ppd);
990 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
991 prb_fill_rxhash(pkc, ppd);
993 prb_clear_rxhash(pkc, ppd);
996 static void prb_fill_curr_block(char *curr,
997 struct tpacket_kbdq_core *pkc,
998 struct tpacket_block_desc *pbd,
1000 __acquires(&pkc->blk_fill_in_prog_lock)
1002 struct tpacket3_hdr *ppd;
1004 ppd = (struct tpacket3_hdr *)curr;
1005 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1007 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1008 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1009 BLOCK_NUM_PKTS(pbd) += 1;
1010 atomic_inc(&pkc->blk_fill_in_prog);
1011 prb_run_all_ft_ops(pkc, ppd);
1014 /* Assumes caller has the sk->rx_queue.lock */
1015 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1016 struct sk_buff *skb,
1021 struct tpacket_kbdq_core *pkc;
1022 struct tpacket_block_desc *pbd;
1025 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1026 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1028 /* Queue is frozen when user space is lagging behind */
1029 if (prb_queue_frozen(pkc)) {
1031 * Check if that last block which caused the queue to freeze,
1032 * is still in_use by user-space.
1034 if (prb_curr_blk_in_use(pbd)) {
1035 /* Can't record this packet */
1039 * Ok, the block was released by user-space.
1040 * Now let's open that block.
1041 * opening a block also thaws the queue.
1042 * Thawing is a side effect.
1044 prb_open_block(pkc, pbd);
1049 curr = pkc->nxt_offset;
1051 end = (char *)pbd + pkc->kblk_size;
1053 /* first try the current block */
1054 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1055 prb_fill_curr_block(curr, pkc, pbd, len);
1056 return (void *)curr;
1059 /* Ok, close the current block */
1060 prb_retire_current_block(pkc, po, 0);
1062 /* Now, try to dispatch the next block */
1063 curr = (char *)prb_dispatch_next_block(pkc, po);
1065 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1066 prb_fill_curr_block(curr, pkc, pbd, len);
1067 return (void *)curr;
1071 * No free blocks are available.user_space hasn't caught up yet.
1072 * Queue was just frozen and now this packet will get dropped.
1077 static void *packet_current_rx_frame(struct packet_sock *po,
1078 struct sk_buff *skb,
1079 int status, unsigned int len)
1082 switch (po->tp_version) {
1085 curr = packet_lookup_frame(po, &po->rx_ring,
1086 po->rx_ring.head, status);
1089 return __packet_lookup_frame_in_block(po, skb, status, len);
1091 WARN(1, "TPACKET version not supported\n");
1097 static void *prb_lookup_block(struct packet_sock *po,
1098 struct packet_ring_buffer *rb,
1102 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1103 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1105 if (status != BLOCK_STATUS(pbd))
1110 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1113 if (rb->prb_bdqc.kactive_blk_num)
1114 prev = rb->prb_bdqc.kactive_blk_num-1;
1116 prev = rb->prb_bdqc.knum_blocks-1;
1120 /* Assumes caller has held the rx_queue.lock */
1121 static void *__prb_previous_block(struct packet_sock *po,
1122 struct packet_ring_buffer *rb,
1125 unsigned int previous = prb_previous_blk_num(rb);
1126 return prb_lookup_block(po, rb, previous, status);
1129 static void *packet_previous_rx_frame(struct packet_sock *po,
1130 struct packet_ring_buffer *rb,
1133 if (po->tp_version <= TPACKET_V2)
1134 return packet_previous_frame(po, rb, status);
1136 return __prb_previous_block(po, rb, status);
1139 static void packet_increment_rx_head(struct packet_sock *po,
1140 struct packet_ring_buffer *rb)
1142 switch (po->tp_version) {
1145 return packet_increment_head(rb);
1148 WARN(1, "TPACKET version not supported.\n");
1154 static void *packet_previous_frame(struct packet_sock *po,
1155 struct packet_ring_buffer *rb,
1158 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1159 return packet_lookup_frame(po, rb, previous, status);
1162 static void packet_increment_head(struct packet_ring_buffer *buff)
1164 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1167 static void packet_inc_pending(struct packet_ring_buffer *rb)
1169 this_cpu_inc(*rb->pending_refcnt);
1172 static void packet_dec_pending(struct packet_ring_buffer *rb)
1174 this_cpu_dec(*rb->pending_refcnt);
1177 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1179 unsigned int refcnt = 0;
1182 /* We don't use pending refcount in rx_ring. */
1183 if (rb->pending_refcnt == NULL)
1186 for_each_possible_cpu(cpu)
1187 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1192 static int packet_alloc_pending(struct packet_sock *po)
1194 po->rx_ring.pending_refcnt = NULL;
1196 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1197 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1203 static void packet_free_pending(struct packet_sock *po)
1205 free_percpu(po->tx_ring.pending_refcnt);
1208 #define ROOM_POW_OFF 2
1209 #define ROOM_NONE 0x0
1210 #define ROOM_LOW 0x1
1211 #define ROOM_NORMAL 0x2
1213 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1217 len = po->rx_ring.frame_max + 1;
1218 idx = po->rx_ring.head;
1220 idx += len >> pow_off;
1223 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1226 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1230 len = po->rx_ring.prb_bdqc.knum_blocks;
1231 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1233 idx += len >> pow_off;
1236 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1239 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1241 struct sock *sk = &po->sk;
1242 int ret = ROOM_NONE;
1244 if (po->prot_hook.func != tpacket_rcv) {
1245 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1246 - (skb ? skb->truesize : 0);
1247 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1255 if (po->tp_version == TPACKET_V3) {
1256 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1258 else if (__tpacket_v3_has_room(po, 0))
1261 if (__tpacket_has_room(po, ROOM_POW_OFF))
1263 else if (__tpacket_has_room(po, 0))
1270 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1275 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1276 ret = __packet_rcv_has_room(po, skb);
1277 has_room = ret == ROOM_NORMAL;
1278 if (po->pressure == has_room)
1279 po->pressure = !has_room;
1280 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1285 static void packet_sock_destruct(struct sock *sk)
1287 skb_queue_purge(&sk->sk_error_queue);
1289 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1290 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1292 if (!sock_flag(sk, SOCK_DEAD)) {
1293 pr_err("Attempt to release alive packet socket: %p\n", sk);
1297 sk_refcnt_debug_dec(sk);
1300 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1302 u32 *history = po->rollover->history;
1306 rxhash = skb_get_hash(skb);
1307 for (i = 0; i < ROLLOVER_HLEN; i++)
1308 if (READ_ONCE(history[i]) == rxhash)
1311 victim = prandom_u32() % ROLLOVER_HLEN;
1313 /* Avoid dirtying the cache line if possible */
1314 if (READ_ONCE(history[victim]) != rxhash)
1315 WRITE_ONCE(history[victim], rxhash);
1317 return count > (ROLLOVER_HLEN >> 1);
1320 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1321 struct sk_buff *skb,
1324 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1327 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1328 struct sk_buff *skb,
1331 unsigned int val = atomic_inc_return(&f->rr_cur);
1336 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1337 struct sk_buff *skb,
1340 return smp_processor_id() % num;
1343 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1344 struct sk_buff *skb,
1347 return prandom_u32_max(num);
1350 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1351 struct sk_buff *skb,
1352 unsigned int idx, bool try_self,
1355 struct packet_sock *po, *po_next, *po_skip = NULL;
1356 unsigned int i, j, room = ROOM_NONE;
1358 po = pkt_sk(f->arr[idx]);
1361 room = packet_rcv_has_room(po, skb);
1362 if (room == ROOM_NORMAL ||
1363 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1368 i = j = min_t(int, po->rollover->sock, num - 1);
1370 po_next = pkt_sk(f->arr[i]);
1371 if (po_next != po_skip && !po_next->pressure &&
1372 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1374 po->rollover->sock = i;
1375 atomic_long_inc(&po->rollover->num);
1376 if (room == ROOM_LOW)
1377 atomic_long_inc(&po->rollover->num_huge);
1385 atomic_long_inc(&po->rollover->num_failed);
1389 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1390 struct sk_buff *skb,
1393 return skb_get_queue_mapping(skb) % num;
1396 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1397 struct sk_buff *skb,
1400 struct bpf_prog *prog;
1401 unsigned int ret = 0;
1404 prog = rcu_dereference(f->bpf_prog);
1406 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1412 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1414 return f->flags & (flag >> 8);
1417 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1418 struct packet_type *pt, struct net_device *orig_dev)
1420 struct packet_fanout *f = pt->af_packet_priv;
1421 unsigned int num = READ_ONCE(f->num_members);
1422 struct net *net = read_pnet(&f->net);
1423 struct packet_sock *po;
1426 if (!net_eq(dev_net(dev), net) || !num) {
1431 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1432 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1437 case PACKET_FANOUT_HASH:
1439 idx = fanout_demux_hash(f, skb, num);
1441 case PACKET_FANOUT_LB:
1442 idx = fanout_demux_lb(f, skb, num);
1444 case PACKET_FANOUT_CPU:
1445 idx = fanout_demux_cpu(f, skb, num);
1447 case PACKET_FANOUT_RND:
1448 idx = fanout_demux_rnd(f, skb, num);
1450 case PACKET_FANOUT_QM:
1451 idx = fanout_demux_qm(f, skb, num);
1453 case PACKET_FANOUT_ROLLOVER:
1454 idx = fanout_demux_rollover(f, skb, 0, false, num);
1456 case PACKET_FANOUT_CBPF:
1457 case PACKET_FANOUT_EBPF:
1458 idx = fanout_demux_bpf(f, skb, num);
1462 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1463 idx = fanout_demux_rollover(f, skb, idx, true, num);
1465 po = pkt_sk(f->arr[idx]);
1466 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1469 DEFINE_MUTEX(fanout_mutex);
1470 EXPORT_SYMBOL_GPL(fanout_mutex);
1471 static LIST_HEAD(fanout_list);
1472 static u16 fanout_next_id;
1474 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1476 struct packet_fanout *f = po->fanout;
1478 spin_lock(&f->lock);
1479 f->arr[f->num_members] = sk;
1482 if (f->num_members == 1)
1483 dev_add_pack(&f->prot_hook);
1484 spin_unlock(&f->lock);
1487 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1489 struct packet_fanout *f = po->fanout;
1492 spin_lock(&f->lock);
1493 for (i = 0; i < f->num_members; i++) {
1494 if (f->arr[i] == sk)
1497 BUG_ON(i >= f->num_members);
1498 f->arr[i] = f->arr[f->num_members - 1];
1500 if (f->num_members == 0)
1501 __dev_remove_pack(&f->prot_hook);
1502 spin_unlock(&f->lock);
1505 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1507 if (sk->sk_family != PF_PACKET)
1510 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1513 static void fanout_init_data(struct packet_fanout *f)
1516 case PACKET_FANOUT_LB:
1517 atomic_set(&f->rr_cur, 0);
1519 case PACKET_FANOUT_CBPF:
1520 case PACKET_FANOUT_EBPF:
1521 RCU_INIT_POINTER(f->bpf_prog, NULL);
1526 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1528 struct bpf_prog *old;
1530 spin_lock(&f->lock);
1531 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1532 rcu_assign_pointer(f->bpf_prog, new);
1533 spin_unlock(&f->lock);
1537 bpf_prog_destroy(old);
1541 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1544 struct bpf_prog *new;
1545 struct sock_fprog fprog;
1548 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1550 if (len != sizeof(fprog))
1552 if (copy_from_user(&fprog, data, len))
1555 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1559 __fanout_set_data_bpf(po->fanout, new);
1563 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1566 struct bpf_prog *new;
1569 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1571 if (len != sizeof(fd))
1573 if (copy_from_user(&fd, data, len))
1576 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1578 return PTR_ERR(new);
1580 __fanout_set_data_bpf(po->fanout, new);
1584 static int fanout_set_data(struct packet_sock *po, char __user *data,
1587 switch (po->fanout->type) {
1588 case PACKET_FANOUT_CBPF:
1589 return fanout_set_data_cbpf(po, data, len);
1590 case PACKET_FANOUT_EBPF:
1591 return fanout_set_data_ebpf(po, data, len);
1597 static void fanout_release_data(struct packet_fanout *f)
1600 case PACKET_FANOUT_CBPF:
1601 case PACKET_FANOUT_EBPF:
1602 __fanout_set_data_bpf(f, NULL);
1606 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1608 struct packet_fanout *f;
1610 list_for_each_entry(f, &fanout_list, list) {
1611 if (f->id == candidate_id &&
1612 read_pnet(&f->net) == sock_net(sk)) {
1619 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1621 u16 id = fanout_next_id;
1624 if (__fanout_id_is_free(sk, id)) {
1626 fanout_next_id = id + 1;
1631 } while (id != fanout_next_id);
1636 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1638 struct packet_rollover *rollover = NULL;
1639 struct packet_sock *po = pkt_sk(sk);
1640 struct packet_fanout *f, *match;
1641 u8 type = type_flags & 0xff;
1642 u8 flags = type_flags >> 8;
1646 case PACKET_FANOUT_ROLLOVER:
1647 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1649 case PACKET_FANOUT_HASH:
1650 case PACKET_FANOUT_LB:
1651 case PACKET_FANOUT_CPU:
1652 case PACKET_FANOUT_RND:
1653 case PACKET_FANOUT_QM:
1654 case PACKET_FANOUT_CBPF:
1655 case PACKET_FANOUT_EBPF:
1661 mutex_lock(&fanout_mutex);
1667 if (type == PACKET_FANOUT_ROLLOVER ||
1668 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1670 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1673 atomic_long_set(&rollover->num, 0);
1674 atomic_long_set(&rollover->num_huge, 0);
1675 atomic_long_set(&rollover->num_failed, 0);
1678 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1683 if (!fanout_find_new_id(sk, &id)) {
1687 /* ephemeral flag for the first socket in the group: drop it */
1688 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1692 list_for_each_entry(f, &fanout_list, list) {
1694 read_pnet(&f->net) == sock_net(sk)) {
1700 if (match && match->flags != flags)
1704 match = kzalloc(sizeof(*match), GFP_KERNEL);
1707 write_pnet(&match->net, sock_net(sk));
1710 match->flags = flags;
1711 INIT_LIST_HEAD(&match->list);
1712 spin_lock_init(&match->lock);
1713 refcount_set(&match->sk_ref, 0);
1714 fanout_init_data(match);
1715 match->prot_hook.type = po->prot_hook.type;
1716 match->prot_hook.dev = po->prot_hook.dev;
1717 match->prot_hook.func = packet_rcv_fanout;
1718 match->prot_hook.af_packet_priv = match;
1719 match->prot_hook.af_packet_net = read_pnet(&match->net);
1720 match->prot_hook.id_match = match_fanout_group;
1721 list_add(&match->list, &fanout_list);
1725 spin_lock(&po->bind_lock);
1727 match->type == type &&
1728 match->prot_hook.type == po->prot_hook.type &&
1729 match->prot_hook.dev == po->prot_hook.dev) {
1731 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1732 __dev_remove_pack(&po->prot_hook);
1734 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1735 WRITE_ONCE(po->fanout, match);
1737 po->rollover = rollover;
1739 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1740 __fanout_link(sk, po);
1744 spin_unlock(&po->bind_lock);
1746 if (err && !refcount_read(&match->sk_ref)) {
1747 list_del(&match->list);
1753 mutex_unlock(&fanout_mutex);
1757 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1758 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1759 * It is the responsibility of the caller to call fanout_release_data() and
1760 * free the returned packet_fanout (after synchronize_net())
1762 static struct packet_fanout *fanout_release(struct sock *sk)
1764 struct packet_sock *po = pkt_sk(sk);
1765 struct packet_fanout *f;
1767 mutex_lock(&fanout_mutex);
1772 if (refcount_dec_and_test(&f->sk_ref))
1777 mutex_unlock(&fanout_mutex);
1782 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1783 struct sk_buff *skb)
1785 /* Earlier code assumed this would be a VLAN pkt, double-check
1786 * this now that we have the actual packet in hand. We can only
1787 * do this check on Ethernet devices.
1789 if (unlikely(dev->type != ARPHRD_ETHER))
1792 skb_reset_mac_header(skb);
1793 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1796 static const struct proto_ops packet_ops;
1798 static const struct proto_ops packet_ops_spkt;
1800 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1801 struct packet_type *pt, struct net_device *orig_dev)
1804 struct sockaddr_pkt *spkt;
1807 * When we registered the protocol we saved the socket in the data
1808 * field for just this event.
1811 sk = pt->af_packet_priv;
1814 * Yank back the headers [hope the device set this
1815 * right or kerboom...]
1817 * Incoming packets have ll header pulled,
1820 * For outgoing ones skb->data == skb_mac_header(skb)
1821 * so that this procedure is noop.
1824 if (skb->pkt_type == PACKET_LOOPBACK)
1827 if (!net_eq(dev_net(dev), sock_net(sk)))
1830 skb = skb_share_check(skb, GFP_ATOMIC);
1834 /* drop any routing info */
1837 /* drop conntrack reference */
1840 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1842 skb_push(skb, skb->data - skb_mac_header(skb));
1845 * The SOCK_PACKET socket receives _all_ frames.
1848 spkt->spkt_family = dev->type;
1849 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1850 spkt->spkt_protocol = skb->protocol;
1853 * Charge the memory to the socket. This is done specifically
1854 * to prevent sockets using all the memory up.
1857 if (sock_queue_rcv_skb(sk, skb) == 0)
1868 * Output a raw packet to a device layer. This bypasses all the other
1869 * protocol layers and you must therefore supply it with a complete frame
1872 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1875 struct sock *sk = sock->sk;
1876 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1877 struct sk_buff *skb = NULL;
1878 struct net_device *dev;
1879 struct sockcm_cookie sockc;
1885 * Get and verify the address.
1889 if (msg->msg_namelen < sizeof(struct sockaddr))
1891 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1892 proto = saddr->spkt_protocol;
1894 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1897 * Find the device first to size check it
1900 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1903 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1909 if (!(dev->flags & IFF_UP))
1913 * You may not queue a frame bigger than the mtu. This is the lowest level
1914 * raw protocol and you must do your own fragmentation at this level.
1917 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1918 if (!netif_supports_nofcs(dev)) {
1919 err = -EPROTONOSUPPORT;
1922 extra_len = 4; /* We're doing our own CRC */
1926 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1930 size_t reserved = LL_RESERVED_SPACE(dev);
1931 int tlen = dev->needed_tailroom;
1932 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1935 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1938 /* FIXME: Save some space for broken drivers that write a hard
1939 * header at transmission time by themselves. PPP is the notable
1940 * one here. This should really be fixed at the driver level.
1942 skb_reserve(skb, reserved);
1943 skb_reset_network_header(skb);
1945 /* Try to align data part correctly */
1950 skb_reset_network_header(skb);
1952 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1958 if (!dev_validate_header(dev, skb->data, len)) {
1962 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1963 !packet_extra_vlan_len_allowed(dev, skb)) {
1968 sockcm_init(&sockc, sk);
1969 if (msg->msg_controllen) {
1970 err = sock_cmsg_send(sk, msg, &sockc);
1975 skb->protocol = proto;
1977 skb->priority = sk->sk_priority;
1978 skb->mark = sk->sk_mark;
1979 skb->tstamp = sockc.transmit_time;
1981 skb_setup_tx_timestamp(skb, sockc.tsflags);
1983 if (unlikely(extra_len == 4))
1986 skb_probe_transport_header(skb, 0);
1988 dev_queue_xmit(skb);
1999 static unsigned int run_filter(struct sk_buff *skb,
2000 const struct sock *sk,
2003 struct sk_filter *filter;
2006 filter = rcu_dereference(sk->sk_filter);
2008 res = bpf_prog_run_clear_cb(filter->prog, skb);
2014 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2017 struct virtio_net_hdr vnet_hdr;
2019 if (*len < sizeof(vnet_hdr))
2021 *len -= sizeof(vnet_hdr);
2023 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2026 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2030 * This function makes lazy skb cloning in hope that most of packets
2031 * are discarded by BPF.
2033 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2034 * and skb->cb are mangled. It works because (and until) packets
2035 * falling here are owned by current CPU. Output packets are cloned
2036 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2037 * sequencially, so that if we return skb to original state on exit,
2038 * we will not harm anyone.
2041 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2042 struct packet_type *pt, struct net_device *orig_dev)
2045 struct sockaddr_ll *sll;
2046 struct packet_sock *po;
2047 u8 *skb_head = skb->data;
2048 int skb_len = skb->len;
2049 unsigned int snaplen, res;
2050 bool is_drop_n_account = false;
2052 if (skb->pkt_type == PACKET_LOOPBACK)
2055 sk = pt->af_packet_priv;
2058 if (!net_eq(dev_net(dev), sock_net(sk)))
2063 if (dev->header_ops) {
2064 /* The device has an explicit notion of ll header,
2065 * exported to higher levels.
2067 * Otherwise, the device hides details of its frame
2068 * structure, so that corresponding packet head is
2069 * never delivered to user.
2071 if (sk->sk_type != SOCK_DGRAM)
2072 skb_push(skb, skb->data - skb_mac_header(skb));
2073 else if (skb->pkt_type == PACKET_OUTGOING) {
2074 /* Special case: outgoing packets have ll header at head */
2075 skb_pull(skb, skb_network_offset(skb));
2081 res = run_filter(skb, sk, snaplen);
2083 goto drop_n_restore;
2087 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2090 if (skb_shared(skb)) {
2091 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2095 if (skb_head != skb->data) {
2096 skb->data = skb_head;
2103 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2105 sll = &PACKET_SKB_CB(skb)->sa.ll;
2106 sll->sll_hatype = dev->type;
2107 sll->sll_pkttype = skb->pkt_type;
2108 if (unlikely(po->origdev))
2109 sll->sll_ifindex = orig_dev->ifindex;
2111 sll->sll_ifindex = dev->ifindex;
2113 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2115 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2116 * Use their space for storing the original skb length.
2118 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2120 if (pskb_trim(skb, snaplen))
2123 skb_set_owner_r(skb, sk);
2127 /* drop conntrack reference */
2130 spin_lock(&sk->sk_receive_queue.lock);
2131 po->stats.stats1.tp_packets++;
2132 sock_skb_set_dropcount(sk, skb);
2133 __skb_queue_tail(&sk->sk_receive_queue, skb);
2134 spin_unlock(&sk->sk_receive_queue.lock);
2135 sk->sk_data_ready(sk);
2139 is_drop_n_account = true;
2140 spin_lock(&sk->sk_receive_queue.lock);
2141 po->stats.stats1.tp_drops++;
2142 atomic_inc(&sk->sk_drops);
2143 spin_unlock(&sk->sk_receive_queue.lock);
2146 if (skb_head != skb->data && skb_shared(skb)) {
2147 skb->data = skb_head;
2151 if (!is_drop_n_account)
2158 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2159 struct packet_type *pt, struct net_device *orig_dev)
2162 struct packet_sock *po;
2163 struct sockaddr_ll *sll;
2164 union tpacket_uhdr h;
2165 u8 *skb_head = skb->data;
2166 int skb_len = skb->len;
2167 unsigned int snaplen, res;
2168 unsigned long status = TP_STATUS_USER;
2169 unsigned short macoff, hdrlen;
2170 unsigned int netoff;
2171 struct sk_buff *copy_skb = NULL;
2174 bool is_drop_n_account = false;
2175 unsigned int slot_id = 0;
2176 bool do_vnet = false;
2178 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2179 * We may add members to them until current aligned size without forcing
2180 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2182 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2183 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2185 if (skb->pkt_type == PACKET_LOOPBACK)
2188 sk = pt->af_packet_priv;
2191 if (!net_eq(dev_net(dev), sock_net(sk)))
2194 if (dev->header_ops) {
2195 if (sk->sk_type != SOCK_DGRAM)
2196 skb_push(skb, skb->data - skb_mac_header(skb));
2197 else if (skb->pkt_type == PACKET_OUTGOING) {
2198 /* Special case: outgoing packets have ll header at head */
2199 skb_pull(skb, skb_network_offset(skb));
2205 res = run_filter(skb, sk, snaplen);
2207 goto drop_n_restore;
2209 if (skb->ip_summed == CHECKSUM_PARTIAL)
2210 status |= TP_STATUS_CSUMNOTREADY;
2211 else if (skb->pkt_type != PACKET_OUTGOING &&
2212 (skb->ip_summed == CHECKSUM_COMPLETE ||
2213 skb_csum_unnecessary(skb)))
2214 status |= TP_STATUS_CSUM_VALID;
2219 if (sk->sk_type == SOCK_DGRAM) {
2220 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2223 unsigned int maclen = skb_network_offset(skb);
2224 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2225 (maclen < 16 ? 16 : maclen)) +
2227 if (po->has_vnet_hdr) {
2228 netoff += sizeof(struct virtio_net_hdr);
2231 macoff = netoff - maclen;
2233 if (netoff > USHRT_MAX) {
2234 spin_lock(&sk->sk_receive_queue.lock);
2235 po->stats.stats1.tp_drops++;
2236 spin_unlock(&sk->sk_receive_queue.lock);
2237 goto drop_n_restore;
2239 if (po->tp_version <= TPACKET_V2) {
2240 if (macoff + snaplen > po->rx_ring.frame_size) {
2241 if (po->copy_thresh &&
2242 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2243 if (skb_shared(skb)) {
2244 copy_skb = skb_clone(skb, GFP_ATOMIC);
2246 copy_skb = skb_get(skb);
2247 skb_head = skb->data;
2250 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2251 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2252 skb_set_owner_r(copy_skb, sk);
2255 snaplen = po->rx_ring.frame_size - macoff;
2256 if ((int)snaplen < 0) {
2261 } else if (unlikely(macoff + snaplen >
2262 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2265 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2266 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2267 snaplen, nval, macoff);
2269 if (unlikely((int)snaplen < 0)) {
2271 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2275 spin_lock(&sk->sk_receive_queue.lock);
2276 h.raw = packet_current_rx_frame(po, skb,
2277 TP_STATUS_KERNEL, (macoff+snaplen));
2279 goto drop_n_account;
2281 if (po->tp_version <= TPACKET_V2) {
2282 slot_id = po->rx_ring.head;
2283 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2284 goto drop_n_account;
2285 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2289 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2290 sizeof(struct virtio_net_hdr),
2291 vio_le(), true, 0)) {
2292 if (po->tp_version == TPACKET_V3)
2293 prb_clear_blk_fill_status(&po->rx_ring);
2294 goto drop_n_account;
2297 if (po->tp_version <= TPACKET_V2) {
2298 packet_increment_rx_head(po, &po->rx_ring);
2300 * LOSING will be reported till you read the stats,
2301 * because it's COR - Clear On Read.
2302 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2305 if (po->stats.stats1.tp_drops)
2306 status |= TP_STATUS_LOSING;
2309 po->stats.stats1.tp_packets++;
2311 status |= TP_STATUS_COPY;
2312 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2314 spin_unlock(&sk->sk_receive_queue.lock);
2316 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2318 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2319 getnstimeofday(&ts);
2321 status |= ts_status;
2323 switch (po->tp_version) {
2325 h.h1->tp_len = skb->len;
2326 h.h1->tp_snaplen = snaplen;
2327 h.h1->tp_mac = macoff;
2328 h.h1->tp_net = netoff;
2329 h.h1->tp_sec = ts.tv_sec;
2330 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2331 hdrlen = sizeof(*h.h1);
2334 h.h2->tp_len = skb->len;
2335 h.h2->tp_snaplen = snaplen;
2336 h.h2->tp_mac = macoff;
2337 h.h2->tp_net = netoff;
2338 h.h2->tp_sec = ts.tv_sec;
2339 h.h2->tp_nsec = ts.tv_nsec;
2340 if (skb_vlan_tag_present(skb)) {
2341 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2342 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2343 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2345 h.h2->tp_vlan_tci = 0;
2346 h.h2->tp_vlan_tpid = 0;
2348 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2349 hdrlen = sizeof(*h.h2);
2352 /* tp_nxt_offset,vlan are already populated above.
2353 * So DONT clear those fields here
2355 h.h3->tp_status |= status;
2356 h.h3->tp_len = skb->len;
2357 h.h3->tp_snaplen = snaplen;
2358 h.h3->tp_mac = macoff;
2359 h.h3->tp_net = netoff;
2360 h.h3->tp_sec = ts.tv_sec;
2361 h.h3->tp_nsec = ts.tv_nsec;
2362 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2363 hdrlen = sizeof(*h.h3);
2369 sll = h.raw + TPACKET_ALIGN(hdrlen);
2370 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2371 sll->sll_family = AF_PACKET;
2372 sll->sll_hatype = dev->type;
2373 sll->sll_protocol = skb->protocol;
2374 sll->sll_pkttype = skb->pkt_type;
2375 if (unlikely(po->origdev))
2376 sll->sll_ifindex = orig_dev->ifindex;
2378 sll->sll_ifindex = dev->ifindex;
2382 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2383 if (po->tp_version <= TPACKET_V2) {
2386 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2389 for (start = h.raw; start < end; start += PAGE_SIZE)
2390 flush_dcache_page(pgv_to_page(start));
2395 if (po->tp_version <= TPACKET_V2) {
2396 spin_lock(&sk->sk_receive_queue.lock);
2397 __packet_set_status(po, h.raw, status);
2398 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2399 spin_unlock(&sk->sk_receive_queue.lock);
2400 sk->sk_data_ready(sk);
2401 } else if (po->tp_version == TPACKET_V3) {
2402 prb_clear_blk_fill_status(&po->rx_ring);
2406 if (skb_head != skb->data && skb_shared(skb)) {
2407 skb->data = skb_head;
2411 if (!is_drop_n_account)
2418 is_drop_n_account = true;
2419 po->stats.stats1.tp_drops++;
2420 spin_unlock(&sk->sk_receive_queue.lock);
2422 sk->sk_data_ready(sk);
2423 kfree_skb(copy_skb);
2424 goto drop_n_restore;
2427 static void tpacket_destruct_skb(struct sk_buff *skb)
2429 struct packet_sock *po = pkt_sk(skb->sk);
2431 if (likely(po->tx_ring.pg_vec)) {
2435 ph = skb_zcopy_get_nouarg(skb);
2436 packet_dec_pending(&po->tx_ring);
2438 ts = __packet_set_timestamp(po, ph, skb);
2439 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2441 if (!packet_read_pending(&po->tx_ring))
2442 complete(&po->skb_completion);
2448 static void tpacket_set_protocol(const struct net_device *dev,
2449 struct sk_buff *skb)
2451 if (dev->type == ARPHRD_ETHER) {
2452 skb_reset_mac_header(skb);
2453 skb->protocol = eth_hdr(skb)->h_proto;
2457 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2459 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2460 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2461 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2462 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2463 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2464 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2465 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2467 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2473 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2474 struct virtio_net_hdr *vnet_hdr)
2476 if (*len < sizeof(*vnet_hdr))
2478 *len -= sizeof(*vnet_hdr);
2480 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2483 return __packet_snd_vnet_parse(vnet_hdr, *len);
2486 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2487 void *frame, struct net_device *dev, void *data, int tp_len,
2488 __be16 proto, unsigned char *addr, int hlen, int copylen,
2489 const struct sockcm_cookie *sockc)
2491 union tpacket_uhdr ph;
2492 int to_write, offset, len, nr_frags, len_max;
2493 struct socket *sock = po->sk.sk_socket;
2499 skb->protocol = proto;
2501 skb->priority = po->sk.sk_priority;
2502 skb->mark = po->sk.sk_mark;
2503 skb->tstamp = sockc->transmit_time;
2504 skb_setup_tx_timestamp(skb, sockc->tsflags);
2505 skb_zcopy_set_nouarg(skb, ph.raw);
2507 skb_reserve(skb, hlen);
2508 skb_reset_network_header(skb);
2512 if (sock->type == SOCK_DGRAM) {
2513 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2515 if (unlikely(err < 0))
2517 } else if (copylen) {
2518 int hdrlen = min_t(int, copylen, tp_len);
2520 skb_push(skb, dev->hard_header_len);
2521 skb_put(skb, copylen - dev->hard_header_len);
2522 err = skb_store_bits(skb, 0, data, hdrlen);
2525 if (!dev_validate_header(dev, skb->data, hdrlen))
2528 tpacket_set_protocol(dev, skb);
2534 offset = offset_in_page(data);
2535 len_max = PAGE_SIZE - offset;
2536 len = ((to_write > len_max) ? len_max : to_write);
2538 skb->data_len = to_write;
2539 skb->len += to_write;
2540 skb->truesize += to_write;
2541 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2543 while (likely(to_write)) {
2544 nr_frags = skb_shinfo(skb)->nr_frags;
2546 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2547 pr_err("Packet exceed the number of skb frags(%lu)\n",
2552 page = pgv_to_page(data);
2554 flush_dcache_page(page);
2556 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2559 len_max = PAGE_SIZE;
2560 len = ((to_write > len_max) ? len_max : to_write);
2563 skb_probe_transport_header(skb, 0);
2568 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2569 int size_max, void **data)
2571 union tpacket_uhdr ph;
2576 switch (po->tp_version) {
2578 if (ph.h3->tp_next_offset != 0) {
2579 pr_warn_once("variable sized slot not supported");
2582 tp_len = ph.h3->tp_len;
2585 tp_len = ph.h2->tp_len;
2588 tp_len = ph.h1->tp_len;
2591 if (unlikely(tp_len > size_max)) {
2592 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2596 if (unlikely(po->tp_tx_has_off)) {
2597 int off_min, off_max;
2599 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2600 off_max = po->tx_ring.frame_size - tp_len;
2601 if (po->sk.sk_type == SOCK_DGRAM) {
2602 switch (po->tp_version) {
2604 off = ph.h3->tp_net;
2607 off = ph.h2->tp_net;
2610 off = ph.h1->tp_net;
2614 switch (po->tp_version) {
2616 off = ph.h3->tp_mac;
2619 off = ph.h2->tp_mac;
2622 off = ph.h1->tp_mac;
2626 if (unlikely((off < off_min) || (off_max < off)))
2629 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2632 *data = frame + off;
2636 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2638 struct sk_buff *skb = NULL;
2639 struct net_device *dev;
2640 struct virtio_net_hdr *vnet_hdr = NULL;
2641 struct sockcm_cookie sockc;
2643 int err, reserve = 0;
2645 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2646 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2647 unsigned char *addr = NULL;
2648 int tp_len, size_max;
2651 int status = TP_STATUS_AVAILABLE;
2652 int hlen, tlen, copylen = 0;
2655 mutex_lock(&po->pg_vec_lock);
2657 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2658 * we need to confirm it under protection of pg_vec_lock.
2660 if (unlikely(!po->tx_ring.pg_vec)) {
2664 if (likely(saddr == NULL)) {
2665 dev = packet_cached_dev_get(po);
2666 proto = READ_ONCE(po->num);
2669 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2671 if (msg->msg_namelen < (saddr->sll_halen
2672 + offsetof(struct sockaddr_ll,
2675 proto = saddr->sll_protocol;
2676 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2677 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2678 if (dev && msg->msg_namelen < dev->addr_len +
2679 offsetof(struct sockaddr_ll, sll_addr))
2681 addr = saddr->sll_addr;
2686 if (unlikely(dev == NULL))
2689 if (unlikely(!(dev->flags & IFF_UP)))
2692 sockcm_init(&sockc, &po->sk);
2693 if (msg->msg_controllen) {
2694 err = sock_cmsg_send(&po->sk, msg, &sockc);
2699 if (po->sk.sk_socket->type == SOCK_RAW)
2700 reserve = dev->hard_header_len;
2701 size_max = po->tx_ring.frame_size
2702 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2704 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2705 size_max = dev->mtu + reserve + VLAN_HLEN;
2707 reinit_completion(&po->skb_completion);
2710 ph = packet_current_frame(po, &po->tx_ring,
2711 TP_STATUS_SEND_REQUEST);
2712 if (unlikely(ph == NULL)) {
2713 if (need_wait && skb) {
2714 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2715 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2717 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2721 /* check for additional frames */
2726 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2730 status = TP_STATUS_SEND_REQUEST;
2731 hlen = LL_RESERVED_SPACE(dev);
2732 tlen = dev->needed_tailroom;
2733 if (po->has_vnet_hdr) {
2735 data += sizeof(*vnet_hdr);
2736 tp_len -= sizeof(*vnet_hdr);
2738 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2742 copylen = __virtio16_to_cpu(vio_le(),
2745 copylen = max_t(int, copylen, dev->hard_header_len);
2746 skb = sock_alloc_send_skb(&po->sk,
2747 hlen + tlen + sizeof(struct sockaddr_ll) +
2748 (copylen - dev->hard_header_len),
2751 if (unlikely(skb == NULL)) {
2752 /* we assume the socket was initially writeable ... */
2753 if (likely(len_sum > 0))
2757 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2758 addr, hlen, copylen, &sockc);
2759 if (likely(tp_len >= 0) &&
2760 tp_len > dev->mtu + reserve &&
2761 !po->has_vnet_hdr &&
2762 !packet_extra_vlan_len_allowed(dev, skb))
2765 if (unlikely(tp_len < 0)) {
2768 __packet_set_status(po, ph,
2769 TP_STATUS_AVAILABLE);
2770 packet_increment_head(&po->tx_ring);
2774 status = TP_STATUS_WRONG_FORMAT;
2780 if (po->has_vnet_hdr) {
2781 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2785 virtio_net_hdr_set_proto(skb, vnet_hdr);
2788 skb->destructor = tpacket_destruct_skb;
2789 __packet_set_status(po, ph, TP_STATUS_SENDING);
2790 packet_inc_pending(&po->tx_ring);
2792 status = TP_STATUS_SEND_REQUEST;
2793 err = po->xmit(skb);
2794 if (unlikely(err != 0)) {
2796 err = net_xmit_errno(err);
2797 if (err && __packet_get_status(po, ph) ==
2798 TP_STATUS_AVAILABLE) {
2799 /* skb was destructed already */
2804 * skb was dropped but not destructed yet;
2805 * let's treat it like congestion or err < 0
2809 packet_increment_head(&po->tx_ring);
2811 } while (likely((ph != NULL) ||
2812 /* Note: packet_read_pending() might be slow if we have
2813 * to call it as it's per_cpu variable, but in fast-path
2814 * we already short-circuit the loop with the first
2815 * condition, and luckily don't have to go that path
2818 (need_wait && packet_read_pending(&po->tx_ring))));
2824 __packet_set_status(po, ph, status);
2829 mutex_unlock(&po->pg_vec_lock);
2833 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2834 size_t reserve, size_t len,
2835 size_t linear, int noblock,
2838 struct sk_buff *skb;
2840 /* Under a page? Don't bother with paged skb. */
2841 if (prepad + len < PAGE_SIZE || !linear)
2844 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2849 skb_reserve(skb, reserve);
2850 skb_put(skb, linear);
2851 skb->data_len = len - linear;
2852 skb->len += len - linear;
2857 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2859 struct sock *sk = sock->sk;
2860 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2861 struct sk_buff *skb;
2862 struct net_device *dev;
2864 unsigned char *addr = NULL;
2865 int err, reserve = 0;
2866 struct sockcm_cookie sockc;
2867 struct virtio_net_hdr vnet_hdr = { 0 };
2869 struct packet_sock *po = pkt_sk(sk);
2870 bool has_vnet_hdr = false;
2871 int hlen, tlen, linear;
2875 * Get and verify the address.
2878 if (likely(saddr == NULL)) {
2879 dev = packet_cached_dev_get(po);
2880 proto = READ_ONCE(po->num);
2883 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2885 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2887 proto = saddr->sll_protocol;
2888 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2889 if (sock->type == SOCK_DGRAM) {
2890 if (dev && msg->msg_namelen < dev->addr_len +
2891 offsetof(struct sockaddr_ll, sll_addr))
2893 addr = saddr->sll_addr;
2898 if (unlikely(dev == NULL))
2901 if (unlikely(!(dev->flags & IFF_UP)))
2904 sockcm_init(&sockc, sk);
2905 sockc.mark = sk->sk_mark;
2906 if (msg->msg_controllen) {
2907 err = sock_cmsg_send(sk, msg, &sockc);
2912 if (sock->type == SOCK_RAW)
2913 reserve = dev->hard_header_len;
2914 if (po->has_vnet_hdr) {
2915 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2918 has_vnet_hdr = true;
2921 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2922 if (!netif_supports_nofcs(dev)) {
2923 err = -EPROTONOSUPPORT;
2926 extra_len = 4; /* We're doing our own CRC */
2930 if (!vnet_hdr.gso_type &&
2931 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2935 hlen = LL_RESERVED_SPACE(dev);
2936 tlen = dev->needed_tailroom;
2937 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2938 linear = max(linear, min_t(int, len, dev->hard_header_len));
2939 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2940 msg->msg_flags & MSG_DONTWAIT, &err);
2944 skb_reset_network_header(skb);
2947 if (sock->type == SOCK_DGRAM) {
2948 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2949 if (unlikely(offset < 0))
2951 } else if (reserve) {
2952 skb_reserve(skb, -reserve);
2953 if (len < reserve + sizeof(struct ipv6hdr) &&
2954 dev->min_header_len != dev->hard_header_len)
2955 skb_reset_network_header(skb);
2958 /* Returns -EFAULT on error */
2959 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2963 if (sock->type == SOCK_RAW &&
2964 !dev_validate_header(dev, skb->data, len)) {
2969 skb_setup_tx_timestamp(skb, sockc.tsflags);
2971 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2972 !packet_extra_vlan_len_allowed(dev, skb)) {
2977 skb->protocol = proto;
2979 skb->priority = sk->sk_priority;
2980 skb->mark = sockc.mark;
2981 skb->tstamp = sockc.transmit_time;
2984 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2987 len += sizeof(vnet_hdr);
2988 virtio_net_hdr_set_proto(skb, &vnet_hdr);
2991 skb_probe_transport_header(skb, reserve);
2993 if (unlikely(extra_len == 4))
2996 err = po->xmit(skb);
2997 if (unlikely(err != 0)) {
2999 err = net_xmit_errno(err);
3017 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3019 struct sock *sk = sock->sk;
3020 struct packet_sock *po = pkt_sk(sk);
3022 if (po->tx_ring.pg_vec)
3023 return tpacket_snd(po, msg);
3025 return packet_snd(sock, msg, len);
3029 * Close a PACKET socket. This is fairly simple. We immediately go
3030 * to 'closed' state and remove our protocol entry in the device list.
3033 static int packet_release(struct socket *sock)
3035 struct sock *sk = sock->sk;
3036 struct packet_sock *po;
3037 struct packet_fanout *f;
3039 union tpacket_req_u req_u;
3047 mutex_lock(&net->packet.sklist_lock);
3048 sk_del_node_init_rcu(sk);
3049 mutex_unlock(&net->packet.sklist_lock);
3052 sock_prot_inuse_add(net, sk->sk_prot, -1);
3055 spin_lock(&po->bind_lock);
3056 unregister_prot_hook(sk, false);
3057 packet_cached_dev_reset(po);
3059 if (po->prot_hook.dev) {
3060 dev_put(po->prot_hook.dev);
3061 po->prot_hook.dev = NULL;
3063 spin_unlock(&po->bind_lock);
3065 packet_flush_mclist(sk);
3068 if (po->rx_ring.pg_vec) {
3069 memset(&req_u, 0, sizeof(req_u));
3070 packet_set_ring(sk, &req_u, 1, 0);
3073 if (po->tx_ring.pg_vec) {
3074 memset(&req_u, 0, sizeof(req_u));
3075 packet_set_ring(sk, &req_u, 1, 1);
3079 f = fanout_release(sk);
3083 kfree(po->rollover);
3085 fanout_release_data(f);
3089 * Now the socket is dead. No more input will appear.
3096 skb_queue_purge(&sk->sk_receive_queue);
3097 packet_free_pending(po);
3098 sk_refcnt_debug_release(sk);
3105 * Attach a packet hook.
3108 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3111 struct packet_sock *po = pkt_sk(sk);
3112 struct net_device *dev_curr;
3115 struct net_device *dev = NULL;
3117 bool unlisted = false;
3120 spin_lock(&po->bind_lock);
3129 dev = dev_get_by_name_rcu(sock_net(sk), name);
3134 } else if (ifindex) {
3135 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3145 proto_curr = po->prot_hook.type;
3146 dev_curr = po->prot_hook.dev;
3148 need_rehook = proto_curr != proto || dev_curr != dev;
3153 /* prevents packet_notifier() from calling
3154 * register_prot_hook()
3156 WRITE_ONCE(po->num, 0);
3157 __unregister_prot_hook(sk, true);
3159 dev_curr = po->prot_hook.dev;
3161 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3165 BUG_ON(po->running);
3166 WRITE_ONCE(po->num, proto);
3167 po->prot_hook.type = proto;
3169 if (unlikely(unlisted)) {
3171 po->prot_hook.dev = NULL;
3172 WRITE_ONCE(po->ifindex, -1);
3173 packet_cached_dev_reset(po);
3175 po->prot_hook.dev = dev;
3176 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3177 packet_cached_dev_assign(po, dev);
3183 if (proto == 0 || !need_rehook)
3186 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3187 register_prot_hook(sk);
3189 sk->sk_err = ENETDOWN;
3190 if (!sock_flag(sk, SOCK_DEAD))
3191 sk->sk_error_report(sk);
3196 spin_unlock(&po->bind_lock);
3202 * Bind a packet socket to a device
3205 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3208 struct sock *sk = sock->sk;
3209 char name[sizeof(uaddr->sa_data) + 1];
3215 if (addr_len != sizeof(struct sockaddr))
3217 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3220 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3221 name[sizeof(uaddr->sa_data)] = 0;
3223 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3226 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3228 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3229 struct sock *sk = sock->sk;
3235 if (addr_len < sizeof(struct sockaddr_ll))
3237 if (sll->sll_family != AF_PACKET)
3240 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3241 sll->sll_protocol ? : pkt_sk(sk)->num);
3244 static struct proto packet_proto = {
3246 .owner = THIS_MODULE,
3247 .obj_size = sizeof(struct packet_sock),
3251 * Create a packet of type SOCK_PACKET.
3254 static int packet_create(struct net *net, struct socket *sock, int protocol,
3258 struct packet_sock *po;
3259 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3262 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3264 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3265 sock->type != SOCK_PACKET)
3266 return -ESOCKTNOSUPPORT;
3268 sock->state = SS_UNCONNECTED;
3271 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3275 sock->ops = &packet_ops;
3276 if (sock->type == SOCK_PACKET)
3277 sock->ops = &packet_ops_spkt;
3279 sock_init_data(sock, sk);
3282 init_completion(&po->skb_completion);
3283 sk->sk_family = PF_PACKET;
3285 po->xmit = dev_queue_xmit;
3287 err = packet_alloc_pending(po);
3291 packet_cached_dev_reset(po);
3293 sk->sk_destruct = packet_sock_destruct;
3294 sk_refcnt_debug_inc(sk);
3297 * Attach a protocol block
3300 spin_lock_init(&po->bind_lock);
3301 mutex_init(&po->pg_vec_lock);
3302 po->rollover = NULL;
3303 po->prot_hook.func = packet_rcv;
3305 if (sock->type == SOCK_PACKET)
3306 po->prot_hook.func = packet_rcv_spkt;
3308 po->prot_hook.af_packet_priv = sk;
3309 po->prot_hook.af_packet_net = sock_net(sk);
3312 po->prot_hook.type = proto;
3313 __register_prot_hook(sk);
3316 mutex_lock(&net->packet.sklist_lock);
3317 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3318 mutex_unlock(&net->packet.sklist_lock);
3321 sock_prot_inuse_add(net, &packet_proto, 1);
3332 * Pull a packet from our receive queue and hand it to the user.
3333 * If necessary we block.
3336 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3339 struct sock *sk = sock->sk;
3340 struct sk_buff *skb;
3342 int vnet_hdr_len = 0;
3343 unsigned int origlen = 0;
3346 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3350 /* What error should we return now? EUNATTACH? */
3351 if (pkt_sk(sk)->ifindex < 0)
3355 if (flags & MSG_ERRQUEUE) {
3356 err = sock_recv_errqueue(sk, msg, len,
3357 SOL_PACKET, PACKET_TX_TIMESTAMP);
3362 * Call the generic datagram receiver. This handles all sorts
3363 * of horrible races and re-entrancy so we can forget about it
3364 * in the protocol layers.
3366 * Now it will return ENETDOWN, if device have just gone down,
3367 * but then it will block.
3370 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3373 * An error occurred so return it. Because skb_recv_datagram()
3374 * handles the blocking we don't see and worry about blocking
3381 if (pkt_sk(sk)->pressure)
3382 packet_rcv_has_room(pkt_sk(sk), NULL);
3384 if (pkt_sk(sk)->has_vnet_hdr) {
3385 err = packet_rcv_vnet(msg, skb, &len);
3388 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3391 /* You lose any data beyond the buffer you gave. If it worries
3392 * a user program they can ask the device for its MTU
3398 msg->msg_flags |= MSG_TRUNC;
3401 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3405 if (sock->type != SOCK_PACKET) {
3406 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3408 /* Original length was stored in sockaddr_ll fields */
3409 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3410 sll->sll_family = AF_PACKET;
3411 sll->sll_protocol = skb->protocol;
3414 sock_recv_ts_and_drops(msg, sk, skb);
3416 if (msg->msg_name) {
3417 const size_t max_len = min(sizeof(skb->cb),
3418 sizeof(struct sockaddr_storage));
3421 /* If the address length field is there to be filled
3422 * in, we fill it in now.
3424 if (sock->type == SOCK_PACKET) {
3425 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3426 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3427 copy_len = msg->msg_namelen;
3429 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3431 msg->msg_namelen = sll->sll_halen +
3432 offsetof(struct sockaddr_ll, sll_addr);
3433 copy_len = msg->msg_namelen;
3434 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3435 memset(msg->msg_name +
3436 offsetof(struct sockaddr_ll, sll_addr),
3437 0, sizeof(sll->sll_addr));
3438 msg->msg_namelen = sizeof(struct sockaddr_ll);
3441 if (WARN_ON_ONCE(copy_len > max_len)) {
3443 msg->msg_namelen = copy_len;
3445 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3448 if (pkt_sk(sk)->auxdata) {
3449 struct tpacket_auxdata aux;
3451 aux.tp_status = TP_STATUS_USER;
3452 if (skb->ip_summed == CHECKSUM_PARTIAL)
3453 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3454 else if (skb->pkt_type != PACKET_OUTGOING &&
3455 (skb->ip_summed == CHECKSUM_COMPLETE ||
3456 skb_csum_unnecessary(skb)))
3457 aux.tp_status |= TP_STATUS_CSUM_VALID;
3459 aux.tp_len = origlen;
3460 aux.tp_snaplen = skb->len;
3462 aux.tp_net = skb_network_offset(skb);
3463 if (skb_vlan_tag_present(skb)) {
3464 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3465 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3466 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3468 aux.tp_vlan_tci = 0;
3469 aux.tp_vlan_tpid = 0;
3471 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3475 * Free or return the buffer as appropriate. Again this
3476 * hides all the races and re-entrancy issues from us.
3478 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3481 skb_free_datagram(sk, skb);
3486 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3489 struct net_device *dev;
3490 struct sock *sk = sock->sk;
3495 uaddr->sa_family = AF_PACKET;
3496 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3498 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3500 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3503 return sizeof(*uaddr);
3506 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3509 struct net_device *dev;
3510 struct sock *sk = sock->sk;
3511 struct packet_sock *po = pkt_sk(sk);
3512 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3518 ifindex = READ_ONCE(po->ifindex);
3519 sll->sll_family = AF_PACKET;
3520 sll->sll_ifindex = ifindex;
3521 sll->sll_protocol = READ_ONCE(po->num);
3522 sll->sll_pkttype = 0;
3524 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3526 sll->sll_hatype = dev->type;
3527 sll->sll_halen = dev->addr_len;
3528 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3530 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3535 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3538 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3542 case PACKET_MR_MULTICAST:
3543 if (i->alen != dev->addr_len)
3546 return dev_mc_add(dev, i->addr);
3548 return dev_mc_del(dev, i->addr);
3550 case PACKET_MR_PROMISC:
3551 return dev_set_promiscuity(dev, what);
3552 case PACKET_MR_ALLMULTI:
3553 return dev_set_allmulti(dev, what);
3554 case PACKET_MR_UNICAST:
3555 if (i->alen != dev->addr_len)
3558 return dev_uc_add(dev, i->addr);
3560 return dev_uc_del(dev, i->addr);
3568 static void packet_dev_mclist_delete(struct net_device *dev,
3569 struct packet_mclist **mlp)
3571 struct packet_mclist *ml;
3573 while ((ml = *mlp) != NULL) {
3574 if (ml->ifindex == dev->ifindex) {
3575 packet_dev_mc(dev, ml, -1);
3583 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3585 struct packet_sock *po = pkt_sk(sk);
3586 struct packet_mclist *ml, *i;
3587 struct net_device *dev;
3593 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3598 if (mreq->mr_alen > dev->addr_len)
3602 i = kmalloc(sizeof(*i), GFP_KERNEL);
3607 for (ml = po->mclist; ml; ml = ml->next) {
3608 if (ml->ifindex == mreq->mr_ifindex &&
3609 ml->type == mreq->mr_type &&
3610 ml->alen == mreq->mr_alen &&
3611 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3613 /* Free the new element ... */
3619 i->type = mreq->mr_type;
3620 i->ifindex = mreq->mr_ifindex;
3621 i->alen = mreq->mr_alen;
3622 memcpy(i->addr, mreq->mr_address, i->alen);
3623 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3625 i->next = po->mclist;
3627 err = packet_dev_mc(dev, i, 1);
3629 po->mclist = i->next;
3638 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3640 struct packet_mclist *ml, **mlp;
3644 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3645 if (ml->ifindex == mreq->mr_ifindex &&
3646 ml->type == mreq->mr_type &&
3647 ml->alen == mreq->mr_alen &&
3648 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3649 if (--ml->count == 0) {
3650 struct net_device *dev;
3652 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3654 packet_dev_mc(dev, ml, -1);
3664 static void packet_flush_mclist(struct sock *sk)
3666 struct packet_sock *po = pkt_sk(sk);
3667 struct packet_mclist *ml;
3673 while ((ml = po->mclist) != NULL) {
3674 struct net_device *dev;
3676 po->mclist = ml->next;
3677 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3679 packet_dev_mc(dev, ml, -1);
3686 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3688 struct sock *sk = sock->sk;
3689 struct packet_sock *po = pkt_sk(sk);
3692 if (level != SOL_PACKET)
3693 return -ENOPROTOOPT;
3696 case PACKET_ADD_MEMBERSHIP:
3697 case PACKET_DROP_MEMBERSHIP:
3699 struct packet_mreq_max mreq;
3701 memset(&mreq, 0, sizeof(mreq));
3702 if (len < sizeof(struct packet_mreq))
3704 if (len > sizeof(mreq))
3706 if (copy_from_user(&mreq, optval, len))
3708 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3710 if (optname == PACKET_ADD_MEMBERSHIP)
3711 ret = packet_mc_add(sk, &mreq);
3713 ret = packet_mc_drop(sk, &mreq);
3717 case PACKET_RX_RING:
3718 case PACKET_TX_RING:
3720 union tpacket_req_u req_u;
3724 switch (po->tp_version) {
3727 len = sizeof(req_u.req);
3731 len = sizeof(req_u.req3);
3737 if (copy_from_user(&req_u.req, optval, len))
3740 ret = packet_set_ring(sk, &req_u, 0,
3741 optname == PACKET_TX_RING);
3746 case PACKET_COPY_THRESH:
3750 if (optlen != sizeof(val))
3752 if (copy_from_user(&val, optval, sizeof(val)))
3755 pkt_sk(sk)->copy_thresh = val;
3758 case PACKET_VERSION:
3762 if (optlen != sizeof(val))
3764 if (copy_from_user(&val, optval, sizeof(val)))
3775 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3778 po->tp_version = val;
3784 case PACKET_RESERVE:
3788 if (optlen != sizeof(val))
3790 if (copy_from_user(&val, optval, sizeof(val)))
3795 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3798 po->tp_reserve = val;
3808 if (optlen != sizeof(val))
3810 if (copy_from_user(&val, optval, sizeof(val)))
3814 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3817 po->tp_loss = !!val;
3823 case PACKET_AUXDATA:
3827 if (optlen < sizeof(val))
3829 if (copy_from_user(&val, optval, sizeof(val)))
3833 po->auxdata = !!val;
3837 case PACKET_ORIGDEV:
3841 if (optlen < sizeof(val))
3843 if (copy_from_user(&val, optval, sizeof(val)))
3847 po->origdev = !!val;
3851 case PACKET_VNET_HDR:
3855 if (sock->type != SOCK_RAW)
3857 if (optlen < sizeof(val))
3859 if (copy_from_user(&val, optval, sizeof(val)))
3863 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3866 po->has_vnet_hdr = !!val;
3872 case PACKET_TIMESTAMP:
3876 if (optlen != sizeof(val))
3878 if (copy_from_user(&val, optval, sizeof(val)))
3881 po->tp_tstamp = val;
3888 if (optlen != sizeof(val))
3890 if (copy_from_user(&val, optval, sizeof(val)))
3893 return fanout_add(sk, val & 0xffff, val >> 16);
3895 case PACKET_FANOUT_DATA:
3897 /* Paired with the WRITE_ONCE() in fanout_add() */
3898 if (!READ_ONCE(po->fanout))
3901 return fanout_set_data(po, optval, optlen);
3903 case PACKET_TX_HAS_OFF:
3907 if (optlen != sizeof(val))
3909 if (copy_from_user(&val, optval, sizeof(val)))
3913 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3916 po->tp_tx_has_off = !!val;
3922 case PACKET_QDISC_BYPASS:
3926 if (optlen != sizeof(val))
3928 if (copy_from_user(&val, optval, sizeof(val)))
3931 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3935 return -ENOPROTOOPT;
3939 static int packet_getsockopt(struct socket *sock, int level, int optname,
3940 char __user *optval, int __user *optlen)
3943 int val, lv = sizeof(val);
3944 struct sock *sk = sock->sk;
3945 struct packet_sock *po = pkt_sk(sk);
3947 union tpacket_stats_u st;
3948 struct tpacket_rollover_stats rstats;
3950 if (level != SOL_PACKET)
3951 return -ENOPROTOOPT;
3953 if (get_user(len, optlen))
3960 case PACKET_STATISTICS:
3961 spin_lock_bh(&sk->sk_receive_queue.lock);
3962 memcpy(&st, &po->stats, sizeof(st));
3963 memset(&po->stats, 0, sizeof(po->stats));
3964 spin_unlock_bh(&sk->sk_receive_queue.lock);
3966 if (po->tp_version == TPACKET_V3) {
3967 lv = sizeof(struct tpacket_stats_v3);
3968 st.stats3.tp_packets += st.stats3.tp_drops;
3971 lv = sizeof(struct tpacket_stats);
3972 st.stats1.tp_packets += st.stats1.tp_drops;
3977 case PACKET_AUXDATA:
3980 case PACKET_ORIGDEV:
3983 case PACKET_VNET_HDR:
3984 val = po->has_vnet_hdr;
3986 case PACKET_VERSION:
3987 val = po->tp_version;
3990 if (len > sizeof(int))
3992 if (len < sizeof(int))
3994 if (copy_from_user(&val, optval, len))
3998 val = sizeof(struct tpacket_hdr);
4001 val = sizeof(struct tpacket2_hdr);
4004 val = sizeof(struct tpacket3_hdr);
4010 case PACKET_RESERVE:
4011 val = po->tp_reserve;
4016 case PACKET_TIMESTAMP:
4017 val = po->tp_tstamp;
4021 ((u32)po->fanout->id |
4022 ((u32)po->fanout->type << 16) |
4023 ((u32)po->fanout->flags << 24)) :
4026 case PACKET_ROLLOVER_STATS:
4029 rstats.tp_all = atomic_long_read(&po->rollover->num);
4030 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4031 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4033 lv = sizeof(rstats);
4035 case PACKET_TX_HAS_OFF:
4036 val = po->tp_tx_has_off;
4038 case PACKET_QDISC_BYPASS:
4039 val = packet_use_direct_xmit(po);
4042 return -ENOPROTOOPT;
4047 if (put_user(len, optlen))
4049 if (copy_to_user(optval, data, len))
4055 #ifdef CONFIG_COMPAT
4056 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
4057 char __user *optval, unsigned int optlen)
4059 struct packet_sock *po = pkt_sk(sock->sk);
4061 if (level != SOL_PACKET)
4062 return -ENOPROTOOPT;
4064 if (optname == PACKET_FANOUT_DATA &&
4065 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
4066 optval = (char __user *)get_compat_bpf_fprog(optval);
4069 optlen = sizeof(struct sock_fprog);
4072 return packet_setsockopt(sock, level, optname, optval, optlen);
4076 static int packet_notifier(struct notifier_block *this,
4077 unsigned long msg, void *ptr)
4080 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4081 struct net *net = dev_net(dev);
4084 sk_for_each_rcu(sk, &net->packet.sklist) {
4085 struct packet_sock *po = pkt_sk(sk);
4088 case NETDEV_UNREGISTER:
4090 packet_dev_mclist_delete(dev, &po->mclist);
4094 if (dev->ifindex == po->ifindex) {
4095 spin_lock(&po->bind_lock);
4097 __unregister_prot_hook(sk, false);
4098 sk->sk_err = ENETDOWN;
4099 if (!sock_flag(sk, SOCK_DEAD))
4100 sk->sk_error_report(sk);
4102 if (msg == NETDEV_UNREGISTER) {
4103 packet_cached_dev_reset(po);
4104 WRITE_ONCE(po->ifindex, -1);
4105 if (po->prot_hook.dev)
4106 dev_put(po->prot_hook.dev);
4107 po->prot_hook.dev = NULL;
4109 spin_unlock(&po->bind_lock);
4113 if (dev->ifindex == po->ifindex) {
4114 spin_lock(&po->bind_lock);
4116 register_prot_hook(sk);
4117 spin_unlock(&po->bind_lock);
4127 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4130 struct sock *sk = sock->sk;
4135 int amount = sk_wmem_alloc_get(sk);
4137 return put_user(amount, (int __user *)arg);
4141 struct sk_buff *skb;
4144 spin_lock_bh(&sk->sk_receive_queue.lock);
4145 skb = skb_peek(&sk->sk_receive_queue);
4148 spin_unlock_bh(&sk->sk_receive_queue.lock);
4149 return put_user(amount, (int __user *)arg);
4152 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4154 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4164 case SIOCGIFBRDADDR:
4165 case SIOCSIFBRDADDR:
4166 case SIOCGIFNETMASK:
4167 case SIOCSIFNETMASK:
4168 case SIOCGIFDSTADDR:
4169 case SIOCSIFDSTADDR:
4171 return inet_dgram_ops.ioctl(sock, cmd, arg);
4175 return -ENOIOCTLCMD;
4180 static __poll_t packet_poll(struct file *file, struct socket *sock,
4183 struct sock *sk = sock->sk;
4184 struct packet_sock *po = pkt_sk(sk);
4185 __poll_t mask = datagram_poll(file, sock, wait);
4187 spin_lock_bh(&sk->sk_receive_queue.lock);
4188 if (po->rx_ring.pg_vec) {
4189 if (!packet_previous_rx_frame(po, &po->rx_ring,
4191 mask |= EPOLLIN | EPOLLRDNORM;
4193 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4195 spin_unlock_bh(&sk->sk_receive_queue.lock);
4196 spin_lock_bh(&sk->sk_write_queue.lock);
4197 if (po->tx_ring.pg_vec) {
4198 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4199 mask |= EPOLLOUT | EPOLLWRNORM;
4201 spin_unlock_bh(&sk->sk_write_queue.lock);
4206 /* Dirty? Well, I still did not learn better way to account
4210 static void packet_mm_open(struct vm_area_struct *vma)
4212 struct file *file = vma->vm_file;
4213 struct socket *sock = file->private_data;
4214 struct sock *sk = sock->sk;
4217 atomic_inc(&pkt_sk(sk)->mapped);
4220 static void packet_mm_close(struct vm_area_struct *vma)
4222 struct file *file = vma->vm_file;
4223 struct socket *sock = file->private_data;
4224 struct sock *sk = sock->sk;
4227 atomic_dec(&pkt_sk(sk)->mapped);
4230 static const struct vm_operations_struct packet_mmap_ops = {
4231 .open = packet_mm_open,
4232 .close = packet_mm_close,
4235 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4240 for (i = 0; i < len; i++) {
4241 if (likely(pg_vec[i].buffer)) {
4242 if (is_vmalloc_addr(pg_vec[i].buffer))
4243 vfree(pg_vec[i].buffer);
4245 free_pages((unsigned long)pg_vec[i].buffer,
4247 pg_vec[i].buffer = NULL;
4253 static char *alloc_one_pg_vec_page(unsigned long order)
4256 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4257 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4259 buffer = (char *) __get_free_pages(gfp_flags, order);
4263 /* __get_free_pages failed, fall back to vmalloc */
4264 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4268 /* vmalloc failed, lets dig into swap here */
4269 gfp_flags &= ~__GFP_NORETRY;
4270 buffer = (char *) __get_free_pages(gfp_flags, order);
4274 /* complete and utter failure */
4278 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4280 unsigned int block_nr = req->tp_block_nr;
4284 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4285 if (unlikely(!pg_vec))
4288 for (i = 0; i < block_nr; i++) {
4289 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4290 if (unlikely(!pg_vec[i].buffer))
4291 goto out_free_pgvec;
4298 free_pg_vec(pg_vec, order, block_nr);
4303 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4304 int closing, int tx_ring)
4306 struct pgv *pg_vec = NULL;
4307 struct packet_sock *po = pkt_sk(sk);
4308 unsigned long *rx_owner_map = NULL;
4309 int was_running, order = 0;
4310 struct packet_ring_buffer *rb;
4311 struct sk_buff_head *rb_queue;
4314 /* Added to avoid minimal code churn */
4315 struct tpacket_req *req = &req_u->req;
4317 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4318 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4322 if (atomic_read(&po->mapped))
4324 if (packet_read_pending(rb))
4328 if (req->tp_block_nr) {
4329 unsigned int min_frame_size;
4331 /* Sanity tests and some calculations */
4333 if (unlikely(rb->pg_vec))
4336 switch (po->tp_version) {
4338 po->tp_hdrlen = TPACKET_HDRLEN;
4341 po->tp_hdrlen = TPACKET2_HDRLEN;
4344 po->tp_hdrlen = TPACKET3_HDRLEN;
4349 if (unlikely((int)req->tp_block_size <= 0))
4351 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4353 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4354 if (po->tp_version >= TPACKET_V3 &&
4355 req->tp_block_size <
4356 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4358 if (unlikely(req->tp_frame_size < min_frame_size))
4360 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4363 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4364 if (unlikely(rb->frames_per_block == 0))
4366 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4368 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4373 order = get_order(req->tp_block_size);
4374 pg_vec = alloc_pg_vec(req, order);
4375 if (unlikely(!pg_vec))
4377 switch (po->tp_version) {
4379 /* Block transmit is not supported yet */
4381 init_prb_bdqc(po, rb, pg_vec, req_u);
4383 struct tpacket_req3 *req3 = &req_u->req3;
4385 if (req3->tp_retire_blk_tov ||
4386 req3->tp_sizeof_priv ||
4387 req3->tp_feature_req_word) {
4389 goto out_free_pg_vec;
4395 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4396 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4398 goto out_free_pg_vec;
4406 if (unlikely(req->tp_frame_nr))
4411 /* Detach socket from network */
4412 spin_lock(&po->bind_lock);
4413 was_running = po->running;
4416 WRITE_ONCE(po->num, 0);
4417 __unregister_prot_hook(sk, false);
4419 spin_unlock(&po->bind_lock);
4424 mutex_lock(&po->pg_vec_lock);
4425 if (closing || atomic_read(&po->mapped) == 0) {
4427 spin_lock_bh(&rb_queue->lock);
4428 swap(rb->pg_vec, pg_vec);
4429 if (po->tp_version <= TPACKET_V2)
4430 swap(rb->rx_owner_map, rx_owner_map);
4431 rb->frame_max = (req->tp_frame_nr - 1);
4433 rb->frame_size = req->tp_frame_size;
4434 spin_unlock_bh(&rb_queue->lock);
4436 swap(rb->pg_vec_order, order);
4437 swap(rb->pg_vec_len, req->tp_block_nr);
4439 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4440 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4441 tpacket_rcv : packet_rcv;
4442 skb_queue_purge(rb_queue);
4443 if (atomic_read(&po->mapped))
4444 pr_err("packet_mmap: vma is busy: %d\n",
4445 atomic_read(&po->mapped));
4447 mutex_unlock(&po->pg_vec_lock);
4449 spin_lock(&po->bind_lock);
4451 WRITE_ONCE(po->num, num);
4452 register_prot_hook(sk);
4454 spin_unlock(&po->bind_lock);
4455 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4456 /* Because we don't support block-based V3 on tx-ring */
4458 prb_shutdown_retire_blk_timer(po, rb_queue);
4463 bitmap_free(rx_owner_map);
4464 free_pg_vec(pg_vec, order, req->tp_block_nr);
4470 static int packet_mmap(struct file *file, struct socket *sock,
4471 struct vm_area_struct *vma)
4473 struct sock *sk = sock->sk;
4474 struct packet_sock *po = pkt_sk(sk);
4475 unsigned long size, expected_size;
4476 struct packet_ring_buffer *rb;
4477 unsigned long start;
4484 mutex_lock(&po->pg_vec_lock);
4487 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4489 expected_size += rb->pg_vec_len
4495 if (expected_size == 0)
4498 size = vma->vm_end - vma->vm_start;
4499 if (size != expected_size)
4502 start = vma->vm_start;
4503 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4504 if (rb->pg_vec == NULL)
4507 for (i = 0; i < rb->pg_vec_len; i++) {
4509 void *kaddr = rb->pg_vec[i].buffer;
4512 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4513 page = pgv_to_page(kaddr);
4514 err = vm_insert_page(vma, start, page);
4523 atomic_inc(&po->mapped);
4524 vma->vm_ops = &packet_mmap_ops;
4528 mutex_unlock(&po->pg_vec_lock);
4532 static const struct proto_ops packet_ops_spkt = {
4533 .family = PF_PACKET,
4534 .owner = THIS_MODULE,
4535 .release = packet_release,
4536 .bind = packet_bind_spkt,
4537 .connect = sock_no_connect,
4538 .socketpair = sock_no_socketpair,
4539 .accept = sock_no_accept,
4540 .getname = packet_getname_spkt,
4541 .poll = datagram_poll,
4542 .ioctl = packet_ioctl,
4543 .listen = sock_no_listen,
4544 .shutdown = sock_no_shutdown,
4545 .setsockopt = sock_no_setsockopt,
4546 .getsockopt = sock_no_getsockopt,
4547 .sendmsg = packet_sendmsg_spkt,
4548 .recvmsg = packet_recvmsg,
4549 .mmap = sock_no_mmap,
4550 .sendpage = sock_no_sendpage,
4553 static const struct proto_ops packet_ops = {
4554 .family = PF_PACKET,
4555 .owner = THIS_MODULE,
4556 .release = packet_release,
4557 .bind = packet_bind,
4558 .connect = sock_no_connect,
4559 .socketpair = sock_no_socketpair,
4560 .accept = sock_no_accept,
4561 .getname = packet_getname,
4562 .poll = packet_poll,
4563 .ioctl = packet_ioctl,
4564 .listen = sock_no_listen,
4565 .shutdown = sock_no_shutdown,
4566 .setsockopt = packet_setsockopt,
4567 .getsockopt = packet_getsockopt,
4568 #ifdef CONFIG_COMPAT
4569 .compat_setsockopt = compat_packet_setsockopt,
4571 .sendmsg = packet_sendmsg,
4572 .recvmsg = packet_recvmsg,
4573 .mmap = packet_mmap,
4574 .sendpage = sock_no_sendpage,
4577 static const struct net_proto_family packet_family_ops = {
4578 .family = PF_PACKET,
4579 .create = packet_create,
4580 .owner = THIS_MODULE,
4583 static struct notifier_block packet_netdev_notifier = {
4584 .notifier_call = packet_notifier,
4587 #ifdef CONFIG_PROC_FS
4589 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4592 struct net *net = seq_file_net(seq);
4595 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4598 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4600 struct net *net = seq_file_net(seq);
4601 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4604 static void packet_seq_stop(struct seq_file *seq, void *v)
4610 static int packet_seq_show(struct seq_file *seq, void *v)
4612 if (v == SEQ_START_TOKEN)
4613 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4615 struct sock *s = sk_entry(v);
4616 const struct packet_sock *po = pkt_sk(s);
4619 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4621 refcount_read(&s->sk_refcnt),
4623 ntohs(READ_ONCE(po->num)),
4624 READ_ONCE(po->ifindex),
4626 atomic_read(&s->sk_rmem_alloc),
4627 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4634 static const struct seq_operations packet_seq_ops = {
4635 .start = packet_seq_start,
4636 .next = packet_seq_next,
4637 .stop = packet_seq_stop,
4638 .show = packet_seq_show,
4642 static int __net_init packet_net_init(struct net *net)
4644 mutex_init(&net->packet.sklist_lock);
4645 INIT_HLIST_HEAD(&net->packet.sklist);
4647 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4648 sizeof(struct seq_net_private)))
4654 static void __net_exit packet_net_exit(struct net *net)
4656 remove_proc_entry("packet", net->proc_net);
4657 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4660 static struct pernet_operations packet_net_ops = {
4661 .init = packet_net_init,
4662 .exit = packet_net_exit,
4666 static void __exit packet_exit(void)
4668 unregister_netdevice_notifier(&packet_netdev_notifier);
4669 unregister_pernet_subsys(&packet_net_ops);
4670 sock_unregister(PF_PACKET);
4671 proto_unregister(&packet_proto);
4674 static int __init packet_init(void)
4678 rc = proto_register(&packet_proto, 0);
4681 rc = sock_register(&packet_family_ops);
4684 rc = register_pernet_subsys(&packet_net_ops);
4687 rc = register_netdevice_notifier(&packet_netdev_notifier);
4694 unregister_pernet_subsys(&packet_net_ops);
4696 sock_unregister(PF_PACKET);
4698 proto_unregister(&packet_proto);
4703 module_init(packet_init);
4704 module_exit(packet_exit);
4705 MODULE_LICENSE("GPL");
4706 MODULE_ALIAS_NETPROTO(PF_PACKET);