2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <crypto/aead.h>
38 #include "en_accel/ipsec_rxtx.h"
39 #include "en_accel/ipsec.h"
43 MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
44 MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
47 struct mlx5e_ipsec_rx_metadata {
48 unsigned char reserved;
53 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
54 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
57 struct mlx5e_ipsec_tx_metadata {
58 __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
59 __be16 seq; /* LSBs of the first TCP seq, only for LSO */
60 u8 esp_next_proto; /* Next protocol of ESP */
63 struct mlx5e_ipsec_metadata {
64 unsigned char syndrome;
67 /* from FPGA to host, on successful decrypt */
68 struct mlx5e_ipsec_rx_metadata rx;
69 /* from host to FPGA */
70 struct mlx5e_ipsec_tx_metadata tx;
72 /* packet type ID field */
76 #define MAX_LSO_MSS 2048
78 /* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
79 static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
81 static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
83 return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
86 static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
88 struct mlx5e_ipsec_metadata *mdata;
91 if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
92 return ERR_PTR(-ENOMEM);
94 eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
95 skb->mac_header -= sizeof(*mdata);
96 mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
98 memmove(skb->data, skb->data + sizeof(*mdata),
101 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
103 memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
107 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
109 unsigned int alen = crypto_aead_authsize(x->data);
110 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
111 struct iphdr *ipv4hdr = ip_hdr(skb);
112 unsigned int trailer_len;
116 ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
120 trailer_len = alen + plen + 2;
122 ret = pskb_trim(skb, skb->len - trailer_len);
125 if (skb->protocol == htons(ETH_P_IP)) {
126 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
127 ip_send_check(ipv4hdr);
129 ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
135 static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
136 struct mlx5_wqe_eth_seg *eseg, u8 mode,
137 struct xfrm_offload *xo)
142 * SWP: OutL3 InL3 InL4
143 * Pkt: MAC IP ESP IP L4
150 * Offsets are in 2-byte words, counting from start of frame
152 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
153 if (skb->protocol == htons(ETH_P_IPV6))
154 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
156 if (mode == XFRM_MODE_TUNNEL) {
157 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
158 if (xo->proto == IPPROTO_IPV6) {
159 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
160 proto = inner_ipv6_hdr(skb)->nexthdr;
162 proto = inner_ip_hdr(skb)->protocol;
165 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
166 if (skb->protocol == htons(ETH_P_IPV6))
167 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
172 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
175 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
180 static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
185 /* Place the SN in the IV field */
186 seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
187 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
188 skb_store_bits(skb, iv_offset, &seqno, 8);
191 static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
192 struct mlx5e_ipsec_metadata *mdata,
193 struct xfrm_offload *xo)
195 struct ip_esp_hdr *esph;
198 if (skb_is_gso(skb)) {
199 /* Add LSO metadata indication */
200 esph = ip_esp_hdr(skb);
201 tcph = inner_tcp_hdr(skb);
202 netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
204 skb->transport_header,
205 skb->inner_network_header,
206 skb->inner_transport_header);
207 netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
208 skb->len, skb_shinfo(skb)->gso_size,
209 ntohs(tcph->source), ntohs(tcph->dest),
210 ntohl(tcph->seq), ntohl(esph->seq_no));
211 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
212 mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
213 mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
215 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
217 mdata->content.tx.esp_next_proto = xo->proto;
219 netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
220 mdata->syndrome, mdata->content.tx.esp_next_proto,
221 ntohs(mdata->content.tx.mss_inv),
222 ntohs(mdata->content.tx.seq));
225 struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
226 struct mlx5e_tx_wqe *wqe,
229 struct mlx5e_priv *priv = netdev_priv(netdev);
230 struct xfrm_offload *xo = xfrm_offload(skb);
231 struct mlx5e_ipsec_metadata *mdata;
232 struct xfrm_state *x;
237 if (unlikely(skb->sp->len != 1)) {
238 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
242 x = xfrm_input_state(skb);
244 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
248 if (unlikely(!x->xso.offload_handle ||
249 (skb->protocol != htons(ETH_P_IP) &&
250 skb->protocol != htons(ETH_P_IPV6)))) {
251 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
255 if (!skb_is_gso(skb))
256 if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
257 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
260 mdata = mlx5e_ipsec_add_metadata(skb);
261 if (unlikely(IS_ERR(mdata))) {
262 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
265 mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
266 mlx5e_ipsec_set_iv(skb, xo);
267 mlx5e_ipsec_set_metadata(skb, mdata, xo);
276 static inline struct xfrm_state *
277 mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
278 struct mlx5e_ipsec_metadata *mdata)
280 struct mlx5e_priv *priv = netdev_priv(netdev);
281 struct xfrm_offload *xo;
282 struct xfrm_state *xs;
285 skb->sp = secpath_dup(skb->sp);
286 if (unlikely(!skb->sp)) {
287 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
291 sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
292 xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
294 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
298 skb->sp->xvec[skb->sp->len++] = xs;
301 xo = xfrm_offload(skb);
302 xo->flags = CRYPTO_DONE;
303 switch (mdata->syndrome) {
304 case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
305 xo->status = CRYPTO_SUCCESS;
307 case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
308 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
311 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
317 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
320 struct mlx5e_ipsec_metadata *mdata;
321 struct ethhdr *old_eth;
322 struct ethhdr *new_eth;
323 struct xfrm_state *xs;
326 /* Detect inline metadata */
327 if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
329 ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
330 if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
333 /* Use the metadata */
334 mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
335 xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
341 /* Remove the metadata from the buffer */
342 old_eth = (struct ethhdr *)skb->data;
343 new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
344 memmove(new_eth, old_eth, 2 * ETH_ALEN);
345 /* Ethertype is already in its new place */
346 skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
351 bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
352 netdev_features_t features)
354 struct xfrm_state *x;
356 if (skb->sp && skb->sp->len) {
357 x = skb->sp->xvec[0];
358 if (x && x->xso.offload_handle)
364 void mlx5e_ipsec_build_inverse_table(void)
369 /* Calculate 1/x inverse table for use in GSO data path.
370 * Using this table, we provide the IPSec accelerator with the value of
371 * 1/gso_size so that it can infer the position of each segment inside
372 * the GSO, and increment the ESP sequence number, and generate the IV.
373 * The HW needs this value in Q0.16 fixed-point number format
375 mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
376 for (mss = 2; mss < MAX_LSO_MSS; mss++) {
377 mss_inv = div_u64(1ULL << 32, mss) >> 16;
378 mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);