1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/netlink.h>
12 #include <linux/netfilter.h>
13 #include <linux/if_arp.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables_offload.h>
17 #include <net/netfilter/nf_tables.h>
23 enum nft_cmp_ops op:8;
26 void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
63 regs->verdict.code = NFT_BREAK;
66 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
72 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc = {
77 .type = NFT_DATA_VALUE,
78 .size = sizeof(priv->data),
82 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
86 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
90 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
95 static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
97 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
99 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
100 goto nla_put_failure;
101 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
102 goto nla_put_failure;
104 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
105 NFT_DATA_VALUE, priv->len) < 0)
106 goto nla_put_failure;
113 union nft_cmp_offload_data {
119 static void nft_payload_n2h(union nft_cmp_offload_data *data,
120 const u8 *val, u32 len)
124 data->val16 = ntohs(*((u16 *)val));
127 data->val32 = ntohl(*((u32 *)val));
130 data->val64 = be64_to_cpu(*((u64 *)val));
138 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
139 struct nft_flow_rule *flow,
140 const struct nft_cmp_expr *priv)
142 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
143 union nft_cmp_offload_data _data, _datamask;
144 u8 *mask = (u8 *)&flow->match.mask;
145 u8 *key = (u8 *)&flow->match.key;
148 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
151 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
152 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
153 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
155 datamask = (u8 *)&_datamask;
157 data = (u8 *)&priv->data;
158 datamask = (u8 *)®->mask;
161 memcpy(key + reg->offset, data, reg->len);
162 memcpy(mask + reg->offset, datamask, reg->len);
164 flow->match.dissector.used_keys |= BIT(reg->key);
165 flow->match.dissector.offset[reg->key] = reg->base_offset;
167 if (reg->key == FLOW_DISSECTOR_KEY_META &&
168 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
169 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
172 nft_offload_update_dependency(ctx, &priv->data, reg->len);
177 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
178 struct nft_flow_rule *flow,
179 const struct nft_expr *expr)
181 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
183 return __nft_cmp_offload(ctx, flow, priv);
186 static const struct nft_expr_ops nft_cmp_ops = {
187 .type = &nft_cmp_type,
188 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
189 .eval = nft_cmp_eval,
190 .init = nft_cmp_init,
191 .dump = nft_cmp_dump,
192 .reduce = NFT_REDUCE_READONLY,
193 .offload = nft_cmp_offload,
196 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
197 const struct nft_expr *expr,
198 const struct nlattr * const tb[])
200 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
201 struct nft_data data;
202 struct nft_data_desc desc = {
203 .type = NFT_DATA_VALUE,
204 .size = sizeof(data),
208 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
212 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
216 desc.len *= BITS_PER_BYTE;
218 priv->mask = nft_cmp_fast_mask(desc.len);
219 priv->data = data.data[0] & priv->mask;
220 priv->len = desc.len;
221 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
225 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
226 struct nft_flow_rule *flow,
227 const struct nft_expr *expr)
229 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
230 struct nft_cmp_expr cmp = {
237 .len = priv->len / BITS_PER_BYTE,
238 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
241 return __nft_cmp_offload(ctx, flow, &cmp);
244 static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
246 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
247 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
248 struct nft_data data;
250 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
251 goto nla_put_failure;
252 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
253 goto nla_put_failure;
255 data.data[0] = priv->data;
256 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
257 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
258 goto nla_put_failure;
265 const struct nft_expr_ops nft_cmp_fast_ops = {
266 .type = &nft_cmp_type,
267 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
268 .eval = NULL, /* inlined */
269 .init = nft_cmp_fast_init,
270 .dump = nft_cmp_fast_dump,
271 .reduce = NFT_REDUCE_READONLY,
272 .offload = nft_cmp_fast_offload,
275 static u32 nft_cmp_mask(u32 bitlen)
277 return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
280 static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
282 int len = bitlen / BITS_PER_BYTE;
283 int i, words = len / sizeof(u32);
285 for (i = 0; i < words; i++) {
286 data->data[i] = 0xffffffff;
287 bitlen -= sizeof(u32) * BITS_PER_BYTE;
290 if (len % sizeof(u32))
291 data->data[i++] = nft_cmp_mask(bitlen);
297 static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
298 const struct nft_expr *expr,
299 const struct nlattr * const tb[])
301 struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
302 struct nft_data_desc desc = {
303 .type = NFT_DATA_VALUE,
304 .size = sizeof(priv->data),
308 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
312 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
316 nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
317 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
318 priv->len = desc.len;
323 static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
324 struct nft_flow_rule *flow,
325 const struct nft_expr *expr)
327 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
328 struct nft_cmp_expr cmp = {
332 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
335 return __nft_cmp_offload(ctx, flow, &cmp);
338 static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
340 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
341 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
343 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
344 goto nla_put_failure;
345 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
346 goto nla_put_failure;
348 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
349 NFT_DATA_VALUE, priv->len) < 0)
350 goto nla_put_failure;
358 const struct nft_expr_ops nft_cmp16_fast_ops = {
359 .type = &nft_cmp_type,
360 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
361 .eval = NULL, /* inlined */
362 .init = nft_cmp16_fast_init,
363 .dump = nft_cmp16_fast_dump,
364 .reduce = NFT_REDUCE_READONLY,
365 .offload = nft_cmp16_fast_offload,
368 static const struct nft_expr_ops *
369 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
371 struct nft_data data;
372 struct nft_data_desc desc = {
373 .type = NFT_DATA_VALUE,
374 .size = sizeof(data),
380 if (tb[NFTA_CMP_SREG] == NULL ||
381 tb[NFTA_CMP_OP] == NULL ||
382 tb[NFTA_CMP_DATA] == NULL)
383 return ERR_PTR(-EINVAL);
385 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
395 return ERR_PTR(-EINVAL);
398 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
402 sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
404 if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
405 if (desc.len <= sizeof(u32))
406 return &nft_cmp_fast_ops;
407 else if (desc.len <= sizeof(data) &&
408 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
409 (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
410 return &nft_cmp16_fast_ops;
415 struct nft_expr_type nft_cmp_type __read_mostly = {
417 .select_ops = nft_cmp_select_ops,
418 .policy = nft_cmp_policy,
419 .maxattr = NFTA_CMP_MAX,
420 .owner = THIS_MODULE,