2 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/netfilter/nft_reject.h>
17 #include <net/netfilter/nf_tables_bridge.h>
18 #include <net/netfilter/ipv4/nf_reject.h>
19 #include <net/netfilter/ipv6/nf_reject.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/netfilter_bridge.h>
24 #include <linux/netfilter_ipv6.h>
25 #include "../br_private.h"
27 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
32 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
33 skb_reset_mac_header(nskb);
34 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
35 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
36 eth->h_proto = eth_hdr(oldskb)->h_proto;
37 skb_pull(nskb, ETH_HLEN);
39 if (skb_vlan_tag_present(oldskb)) {
40 u16 vid = skb_vlan_tag_get(oldskb);
42 __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
46 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
47 * or the bridge port (NF_BRIDGE PREROUTING).
49 static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
50 const struct net_device *dev,
55 const struct tcphdr *oth;
58 if (!nft_bridge_iphdr_validate(oldskb))
61 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
65 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
66 LL_MAX_HEADER, GFP_ATOMIC);
70 skb_reserve(nskb, LL_MAX_HEADER);
71 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
72 sysctl_ip_default_ttl);
73 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
74 niph->ttl = sysctl_ip_default_ttl;
75 niph->tot_len = htons(nskb->len);
78 nft_reject_br_push_etherhdr(oldskb, nskb);
80 br_deliver(br_port_get_rcu(dev), nskb);
83 static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
84 const struct net_device *dev,
89 struct icmphdr *icmph;
95 if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
98 /* IP header checks: fragment. */
99 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
102 /* RFC says return as much as we can without exceeding 576 bytes. */
103 len = min_t(unsigned int, 536, oldskb->len);
105 if (!pskb_may_pull(oldskb, len))
108 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
111 if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
112 ip_hdr(oldskb)->protocol == IPPROTO_UDP)
113 proto = ip_hdr(oldskb)->protocol;
117 if (!skb_csum_unnecessary(oldskb) &&
118 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
121 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
122 LL_MAX_HEADER + len, GFP_ATOMIC);
126 skb_reserve(nskb, LL_MAX_HEADER);
127 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
128 sysctl_ip_default_ttl);
130 skb_reset_transport_header(nskb);
131 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
132 memset(icmph, 0, sizeof(*icmph));
133 icmph->type = ICMP_DEST_UNREACH;
136 payload = skb_put(nskb, len);
137 memcpy(payload, skb_network_header(oldskb), len);
139 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
140 icmph->checksum = csum_fold(csum);
142 niph->tot_len = htons(nskb->len);
145 nft_reject_br_push_etherhdr(oldskb, nskb);
147 br_deliver(br_port_get_rcu(dev), nskb);
150 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
151 struct sk_buff *oldskb,
152 const struct net_device *dev,
155 struct sk_buff *nskb;
156 const struct tcphdr *oth;
158 unsigned int otcplen;
159 struct ipv6hdr *nip6h;
161 if (!nft_bridge_ip6hdr_validate(oldskb))
164 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
168 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
169 LL_MAX_HEADER, GFP_ATOMIC);
173 skb_reserve(nskb, LL_MAX_HEADER);
174 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
175 net->ipv6.devconf_all->hop_limit);
176 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
177 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
179 nft_reject_br_push_etherhdr(oldskb, nskb);
181 br_deliver(br_port_get_rcu(dev), nskb);
184 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
186 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
189 u8 proto = ip6h->nexthdr;
194 if (skb_csum_unnecessary(skb))
197 if (ip6h->payload_len &&
198 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
201 ip6h = ipv6_hdr(skb);
202 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
203 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
206 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
209 static void nft_reject_br_send_v6_unreach(struct net *net,
210 struct sk_buff *oldskb,
211 const struct net_device *dev,
214 struct sk_buff *nskb;
215 struct ipv6hdr *nip6h;
216 struct icmp6hdr *icmp6h;
220 if (!nft_bridge_ip6hdr_validate(oldskb))
223 /* Include "As much of invoking packet as possible without the ICMPv6
224 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
226 len = min_t(unsigned int, 1220, oldskb->len);
228 if (!pskb_may_pull(oldskb, len))
231 if (!reject6_br_csum_ok(oldskb, hook))
234 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
235 LL_MAX_HEADER + len, GFP_ATOMIC);
239 skb_reserve(nskb, LL_MAX_HEADER);
240 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
241 net->ipv6.devconf_all->hop_limit);
243 skb_reset_transport_header(nskb);
244 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
245 memset(icmp6h, 0, sizeof(*icmp6h));
246 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
247 icmp6h->icmp6_code = code;
249 payload = skb_put(nskb, len);
250 memcpy(payload, skb_network_header(oldskb), len);
251 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
253 icmp6h->icmp6_cksum =
254 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
255 nskb->len - sizeof(struct ipv6hdr),
258 nskb->len - sizeof(struct ipv6hdr),
261 nft_reject_br_push_etherhdr(oldskb, nskb);
263 br_deliver(br_port_get_rcu(dev), nskb);
266 static void nft_reject_bridge_eval(const struct nft_expr *expr,
267 struct nft_regs *regs,
268 const struct nft_pktinfo *pkt)
270 struct nft_reject *priv = nft_expr_priv(expr);
271 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
273 if (is_broadcast_ether_addr(dest) ||
274 is_multicast_ether_addr(dest))
277 switch (eth_hdr(pkt->skb)->h_proto) {
278 case htons(ETH_P_IP):
279 switch (priv->type) {
280 case NFT_REJECT_ICMP_UNREACH:
281 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
285 case NFT_REJECT_TCP_RST:
286 nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
289 case NFT_REJECT_ICMPX_UNREACH:
290 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
292 nft_reject_icmp_code(priv->icmp_code));
296 case htons(ETH_P_IPV6):
297 switch (priv->type) {
298 case NFT_REJECT_ICMP_UNREACH:
299 nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
303 case NFT_REJECT_TCP_RST:
304 nft_reject_br_send_v6_tcp_reset(pkt->net, pkt->skb,
307 case NFT_REJECT_ICMPX_UNREACH:
308 nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
310 nft_reject_icmpv6_code(priv->icmp_code));
315 /* No explicit way to reject this protocol, drop it. */
319 regs->verdict.code = NF_DROP;
322 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
323 const struct nft_expr *expr,
324 const struct nft_data **data)
326 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
327 (1 << NF_BR_LOCAL_IN));
330 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
331 const struct nft_expr *expr,
332 const struct nlattr * const tb[])
334 struct nft_reject *priv = nft_expr_priv(expr);
337 err = nft_reject_bridge_validate(ctx, expr, NULL);
341 if (tb[NFTA_REJECT_TYPE] == NULL)
344 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
345 switch (priv->type) {
346 case NFT_REJECT_ICMP_UNREACH:
347 case NFT_REJECT_ICMPX_UNREACH:
348 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
351 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
352 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
353 icmp_code > NFT_REJECT_ICMPX_MAX)
356 priv->icmp_code = icmp_code;
358 case NFT_REJECT_TCP_RST:
366 static int nft_reject_bridge_dump(struct sk_buff *skb,
367 const struct nft_expr *expr)
369 const struct nft_reject *priv = nft_expr_priv(expr);
371 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
372 goto nla_put_failure;
374 switch (priv->type) {
375 case NFT_REJECT_ICMP_UNREACH:
376 case NFT_REJECT_ICMPX_UNREACH:
377 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
378 goto nla_put_failure;
390 static struct nft_expr_type nft_reject_bridge_type;
391 static const struct nft_expr_ops nft_reject_bridge_ops = {
392 .type = &nft_reject_bridge_type,
393 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
394 .eval = nft_reject_bridge_eval,
395 .init = nft_reject_bridge_init,
396 .dump = nft_reject_bridge_dump,
397 .validate = nft_reject_bridge_validate,
400 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
401 .family = NFPROTO_BRIDGE,
403 .ops = &nft_reject_bridge_ops,
404 .policy = nft_reject_policy,
405 .maxattr = NFTA_REJECT_MAX,
406 .owner = THIS_MODULE,
409 static int __init nft_reject_bridge_module_init(void)
411 return nft_register_expr(&nft_reject_bridge_type);
414 static void __exit nft_reject_bridge_module_exit(void)
416 nft_unregister_expr(&nft_reject_bridge_type);
419 module_init(nft_reject_bridge_module_init);
420 module_exit(nft_reject_bridge_module_exit);
422 MODULE_LICENSE("GPL");
423 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
424 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");