1 #include <linux/module.h>
2 #include <linux/sock_diag.h>
4 #include <linux/netdevice.h>
5 #include <linux/packet_diag.h>
6 #include <linux/percpu.h>
7 #include <net/net_namespace.h>
12 static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
14 struct packet_diag_info pinfo;
16 pinfo.pdi_index = po->ifindex;
17 pinfo.pdi_version = po->tp_version;
18 pinfo.pdi_reserve = po->tp_reserve;
19 pinfo.pdi_copy_thresh = po->copy_thresh;
20 pinfo.pdi_tstamp = po->tp_tstamp;
24 pinfo.pdi_flags |= PDI_RUNNING;
26 pinfo.pdi_flags |= PDI_AUXDATA;
28 pinfo.pdi_flags |= PDI_ORIGDEV;
30 pinfo.pdi_flags |= PDI_VNETHDR;
32 pinfo.pdi_flags |= PDI_LOSS;
34 return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
37 static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
40 struct packet_mclist *ml;
42 mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
47 for (ml = po->mclist; ml; ml = ml->next) {
48 struct packet_diag_mclist *dml;
50 dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
53 nla_nest_cancel(nlskb, mca);
57 dml->pdmc_index = ml->ifindex;
58 dml->pdmc_type = ml->type;
59 dml->pdmc_alen = ml->alen;
60 dml->pdmc_count = ml->count;
61 BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
62 memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
66 nla_nest_end(nlskb, mca);
71 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
72 struct sk_buff *nlskb)
74 struct packet_diag_ring pdr;
79 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
80 pdr.pdr_block_nr = ring->pg_vec_len;
81 pdr.pdr_frame_size = ring->frame_size;
82 pdr.pdr_frame_nr = ring->frame_max + 1;
84 if (ver > TPACKET_V2) {
85 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
86 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
87 pdr.pdr_features = ring->prb_bdqc.feature_req_word;
89 pdr.pdr_retire_tmo = 0;
90 pdr.pdr_sizeof_priv = 0;
94 return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
97 static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
101 mutex_lock(&po->pg_vec_lock);
102 ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
103 PACKET_DIAG_RX_RING, skb);
105 ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
106 PACKET_DIAG_TX_RING, skb);
107 mutex_unlock(&po->pg_vec_lock);
112 static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
116 mutex_lock(&fanout_mutex);
120 val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
121 ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
123 mutex_unlock(&fanout_mutex);
128 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
129 struct packet_diag_req *req,
130 bool may_report_filterinfo,
131 struct user_namespace *user_ns,
132 u32 portid, u32 seq, u32 flags, int sk_ino)
134 struct nlmsghdr *nlh;
135 struct packet_diag_msg *rp;
136 struct packet_sock *po = pkt_sk(sk);
138 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
142 rp = nlmsg_data(nlh);
143 rp->pdiag_family = AF_PACKET;
144 rp->pdiag_type = sk->sk_type;
145 rp->pdiag_num = ntohs(po->num);
146 rp->pdiag_ino = sk_ino;
147 sock_diag_save_cookie(sk, rp->pdiag_cookie);
149 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
150 pdiag_put_info(po, skb))
153 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
154 nla_put_u32(skb, PACKET_DIAG_UID,
155 from_kuid_munged(user_ns, sock_i_uid(sk))))
158 if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
159 pdiag_put_mclist(po, skb))
162 if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
163 pdiag_put_rings_cfg(po, skb))
166 if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
167 pdiag_put_fanout(po, skb))
170 if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
171 sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
174 if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
175 sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
183 nlmsg_cancel(skb, nlh);
187 static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
189 int num = 0, s_num = cb->args[0];
190 struct packet_diag_req *req;
193 bool may_report_filterinfo;
195 net = sock_net(skb->sk);
196 req = nlmsg_data(cb->nlh);
197 may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
199 mutex_lock(&net->packet.sklist_lock);
200 sk_for_each(sk, &net->packet.sklist) {
201 if (!net_eq(sock_net(sk), net))
206 if (sk_diag_fill(sk, skb, req,
207 may_report_filterinfo,
208 sk_user_ns(NETLINK_CB(cb->skb).sk),
209 NETLINK_CB(cb->skb).portid,
210 cb->nlh->nlmsg_seq, NLM_F_MULTI,
217 mutex_unlock(&net->packet.sklist_lock);
223 static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
225 int hdrlen = sizeof(struct packet_diag_req);
226 struct net *net = sock_net(skb->sk);
227 struct packet_diag_req *req;
229 if (nlmsg_len(h) < hdrlen)
233 /* Make it possible to support protocol filtering later */
234 if (req->sdiag_protocol)
237 if (h->nlmsg_flags & NLM_F_DUMP) {
238 struct netlink_dump_control c = {
239 .dump = packet_diag_dump,
241 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
246 static const struct sock_diag_handler packet_diag_handler = {
248 .dump = packet_diag_handler_dump,
251 static int __init packet_diag_init(void)
253 return sock_diag_register(&packet_diag_handler);
256 static void __exit packet_diag_exit(void)
258 sock_diag_unregister(&packet_diag_handler);
261 module_init(packet_diag_init);
262 module_exit(packet_diag_exit);
263 MODULE_LICENSE("GPL");
264 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);