2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/if_arp.h>
36 #include <linux/netdevice.h>
38 #include <linux/if_vlan.h>
39 #include <net/udp_tunnel.h>
40 #include <net/sch_generic.h>
41 #include <linux/netfilter.h>
42 #include <rdma/ib_addr.h>
48 static LIST_HEAD(rxe_dev_list);
49 static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
51 struct rxe_dev *net_to_rxe(struct net_device *ndev)
54 struct rxe_dev *found = NULL;
56 spin_lock_bh(&dev_list_lock);
57 list_for_each_entry(rxe, &rxe_dev_list, list) {
58 if (rxe->ndev == ndev) {
63 spin_unlock_bh(&dev_list_lock);
68 struct rxe_dev *get_rxe_by_name(const char *name)
71 struct rxe_dev *found = NULL;
73 spin_lock_bh(&dev_list_lock);
74 list_for_each_entry(rxe, &rxe_dev_list, list) {
75 if (!strcmp(name, rxe->ib_dev.name)) {
80 spin_unlock_bh(&dev_list_lock);
85 struct rxe_recv_sockets recv_sockets;
87 struct device *rxe_dma_device(struct rxe_dev *rxe)
89 struct net_device *ndev;
93 if (is_vlan_dev(ndev))
94 ndev = vlan_dev_real_dev(ndev);
96 return ndev->dev.parent;
99 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
102 unsigned char ll_addr[ETH_ALEN];
104 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
105 err = dev_mc_add(rxe->ndev, ll_addr);
110 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
113 unsigned char ll_addr[ETH_ALEN];
115 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
116 err = dev_mc_del(rxe->ndev, ll_addr);
121 static struct dst_entry *rxe_find_route4(struct net_device *ndev,
122 struct in_addr *saddr,
123 struct in_addr *daddr)
126 struct flowi4 fl = { { 0 } };
128 memset(&fl, 0, sizeof(fl));
129 fl.flowi4_oif = ndev->ifindex;
130 memcpy(&fl.saddr, saddr, sizeof(*saddr));
131 memcpy(&fl.daddr, daddr, sizeof(*daddr));
132 fl.flowi4_proto = IPPROTO_UDP;
134 rt = ip_route_output_key(&init_net, &fl);
136 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
143 #if IS_ENABLED(CONFIG_IPV6)
144 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
145 struct in6_addr *saddr,
146 struct in6_addr *daddr)
148 struct dst_entry *ndst;
149 struct flowi6 fl6 = { { 0 } };
151 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowi6_oif = ndev->ifindex;
153 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
154 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
155 fl6.flowi6_proto = IPPROTO_UDP;
157 ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
158 recv_sockets.sk6->sk, &fl6,
160 if (unlikely(IS_ERR(ndst))) {
161 pr_err_ratelimited("no route to %pI6\n", daddr);
165 if (unlikely(ndst->error)) {
166 pr_err("no route to %pI6\n", daddr);
178 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
179 struct in6_addr *saddr,
180 struct in6_addr *daddr)
187 static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
191 struct dst_entry *dst = NULL;
193 if (qp_type(qp) == IB_QPT_RC)
194 dst = sk_dst_get(qp->sk->sk);
196 if (!dst || !dst_check(dst, qp->dst_cookie)) {
200 if (av->network_type == RDMA_NETWORK_IPV4) {
201 struct in_addr *saddr;
202 struct in_addr *daddr;
204 saddr = &av->sgid_addr._sockaddr_in.sin_addr;
205 daddr = &av->dgid_addr._sockaddr_in.sin_addr;
206 dst = rxe_find_route4(rxe->ndev, saddr, daddr);
207 } else if (av->network_type == RDMA_NETWORK_IPV6) {
208 struct in6_addr *saddr6;
209 struct in6_addr *daddr6;
211 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
212 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
213 dst = rxe_find_route6(rxe->ndev, saddr6, daddr6);
214 #if IS_ENABLED(CONFIG_IPV6)
217 rt6_get_cookie((struct rt6_info *)dst);
225 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
228 struct net_device *ndev = skb->dev;
229 struct rxe_dev *rxe = net_to_rxe(ndev);
230 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
235 if (skb_linearize(skb)) {
236 pr_err("skb_linearize failed\n");
243 pkt->hdr = (u8 *)(udph + 1);
244 pkt->mask = RXE_GRH_MASK;
245 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
253 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
258 struct udp_port_cfg udp_cfg = { };
259 struct udp_tunnel_sock_cfg tnl_cfg = { };
262 udp_cfg.family = AF_INET6;
263 udp_cfg.ipv6_v6only = 1;
265 udp_cfg.family = AF_INET;
268 udp_cfg.local_udp_port = port;
270 /* Create UDP socket */
271 err = udp_sock_create(net, &udp_cfg, &sock);
275 tnl_cfg.encap_type = 1;
276 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
278 /* Setup UDP tunnel */
279 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
284 void rxe_release_udp_tunnel(struct socket *sk)
287 udp_tunnel_sock_release(sk);
290 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
295 __skb_push(skb, sizeof(*udph));
296 skb_reset_transport_header(skb);
299 udph->dest = dst_port;
300 udph->source = src_port;
301 udph->len = htons(skb->len);
305 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
306 __be32 saddr, __be32 daddr, __u8 proto,
307 __u8 tos, __u8 ttl, __be16 df, bool xnet)
311 skb_scrub_packet(skb, xnet);
314 skb_dst_set(skb, dst_clone(dst));
315 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
317 skb_push(skb, sizeof(struct iphdr));
318 skb_reset_network_header(skb);
322 iph->version = IPVERSION;
323 iph->ihl = sizeof(struct iphdr) >> 2;
325 iph->protocol = proto;
330 __ip_select_ident(dev_net(dst->dev), iph,
331 skb_shinfo(skb)->gso_segs ?: 1);
332 iph->tot_len = htons(skb->len);
336 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
337 struct in6_addr *saddr, struct in6_addr *daddr,
338 __u8 proto, __u8 prio, __u8 ttl)
340 struct ipv6hdr *ip6h;
342 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
343 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
345 skb_dst_set(skb, dst_clone(dst));
347 __skb_push(skb, sizeof(*ip6h));
348 skb_reset_network_header(skb);
349 ip6h = ipv6_hdr(skb);
350 ip6_flow_hdr(ip6h, prio, htonl(0));
351 ip6h->payload_len = htons(skb->len);
352 ip6h->nexthdr = proto;
353 ip6h->hop_limit = ttl;
354 ip6h->daddr = *daddr;
355 ip6h->saddr = *saddr;
356 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
359 static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
360 struct sk_buff *skb, struct rxe_av *av)
362 struct rxe_qp *qp = pkt->qp;
363 struct dst_entry *dst;
365 __be16 df = htons(IP_DF);
366 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
367 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
369 dst = rxe_find_route(rxe, qp, av);
371 pr_err("Host not reachable\n");
372 return -EHOSTUNREACH;
375 if (!memcmp(saddr, daddr, sizeof(*daddr)))
376 pkt->mask |= RXE_LOOPBACK_MASK;
378 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
379 htons(ROCE_V2_UDP_DPORT));
381 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
382 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
384 if (qp_type(qp) == IB_QPT_RC)
385 sk_dst_set(qp->sk->sk, dst);
392 static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
393 struct sk_buff *skb, struct rxe_av *av)
395 struct rxe_qp *qp = pkt->qp;
396 struct dst_entry *dst;
397 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
398 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
400 dst = rxe_find_route(rxe, qp, av);
402 pr_err("Host not reachable\n");
403 return -EHOSTUNREACH;
406 if (!memcmp(saddr, daddr, sizeof(*daddr)))
407 pkt->mask |= RXE_LOOPBACK_MASK;
409 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
410 htons(ROCE_V2_UDP_DPORT));
412 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
413 av->grh.traffic_class,
416 if (qp_type(qp) == IB_QPT_RC)
417 sk_dst_set(qp->sk->sk, dst);
424 int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
425 struct sk_buff *skb, u32 *crc)
428 struct rxe_av *av = rxe_get_av(pkt);
430 if (av->network_type == RDMA_NETWORK_IPV4)
431 err = prepare4(rxe, pkt, skb, av);
432 else if (av->network_type == RDMA_NETWORK_IPV6)
433 err = prepare6(rxe, pkt, skb, av);
435 *crc = rxe_icrc_hdr(pkt, skb);
440 static void rxe_skb_tx_dtor(struct sk_buff *skb)
442 struct sock *sk = skb->sk;
443 struct rxe_qp *qp = sk->sk_user_data;
444 int skb_out = atomic_dec_return(&qp->skb_out);
446 if (unlikely(qp->need_req_skb &&
447 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
448 rxe_run_task(&qp->req.task, 1);
453 int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
455 struct sk_buff *nskb;
459 av = rxe_get_av(pkt);
461 nskb = skb_clone(skb, GFP_ATOMIC);
465 nskb->destructor = rxe_skb_tx_dtor;
466 nskb->sk = pkt->qp->sk->sk;
468 rxe_add_ref(pkt->qp);
469 atomic_inc(&pkt->qp->skb_out);
471 if (av->network_type == RDMA_NETWORK_IPV4) {
472 err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
473 } else if (av->network_type == RDMA_NETWORK_IPV6) {
474 err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
476 pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
477 atomic_dec(&pkt->qp->skb_out);
478 rxe_drop_ref(pkt->qp);
483 if (unlikely(net_xmit_eval(err))) {
484 pr_debug("error sending packet: %d\n", err);
492 int rxe_loopback(struct sk_buff *skb)
497 static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
499 return rxe->port.port_guid == av->grh.dgid.global.interface_id;
502 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
503 int paylen, struct rxe_pkt_info *pkt)
505 unsigned int hdr_len;
508 if (av->network_type == RDMA_NETWORK_IPV4)
509 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
510 sizeof(struct iphdr);
512 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
513 sizeof(struct ipv6hdr);
515 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
520 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
522 skb->dev = rxe->ndev;
523 if (av->network_type == RDMA_NETWORK_IPV4)
524 skb->protocol = htons(ETH_P_IP);
526 skb->protocol = htons(ETH_P_IPV6);
530 pkt->hdr = skb_put(skb, paylen);
531 pkt->mask |= RXE_GRH_MASK;
533 memset(pkt->hdr, 0, paylen);
539 * this is required by rxe_cfg to match rxe devices in
540 * /sys/class/infiniband up with their underlying ethernet devices
542 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
544 return rxe->ndev->name;
547 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
549 return IB_LINK_LAYER_ETHERNET;
552 struct rxe_dev *rxe_net_add(struct net_device *ndev)
555 struct rxe_dev *rxe = NULL;
557 rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
563 err = rxe_add(rxe, ndev->mtu);
565 ib_dealloc_device(&rxe->ib_dev);
569 spin_lock_bh(&dev_list_lock);
570 list_add_tail(&rxe->list, &rxe_dev_list);
571 spin_unlock_bh(&dev_list_lock);
575 void rxe_remove_all(void)
577 spin_lock_bh(&dev_list_lock);
578 while (!list_empty(&rxe_dev_list)) {
579 struct rxe_dev *rxe =
580 list_first_entry(&rxe_dev_list, struct rxe_dev, list);
582 list_del(&rxe->list);
583 spin_unlock_bh(&dev_list_lock);
585 spin_lock_bh(&dev_list_lock);
587 spin_unlock_bh(&dev_list_lock);
589 EXPORT_SYMBOL(rxe_remove_all);
591 static void rxe_port_event(struct rxe_dev *rxe,
592 enum ib_event_type event)
596 ev.device = &rxe->ib_dev;
597 ev.element.port_num = 1;
600 ib_dispatch_event(&ev);
603 /* Caller must hold net_info_lock */
604 void rxe_port_up(struct rxe_dev *rxe)
606 struct rxe_port *port;
609 port->attr.state = IB_PORT_ACTIVE;
610 port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
612 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
613 pr_info("set %s active\n", rxe->ib_dev.name);
616 /* Caller must hold net_info_lock */
617 void rxe_port_down(struct rxe_dev *rxe)
619 struct rxe_port *port;
622 port->attr.state = IB_PORT_DOWN;
623 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
625 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
626 pr_info("set %s down\n", rxe->ib_dev.name);
629 static int rxe_notify(struct notifier_block *not_blk,
633 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
634 struct rxe_dev *rxe = net_to_rxe(ndev);
640 case NETDEV_UNREGISTER:
641 list_del(&rxe->list);
650 case NETDEV_CHANGEMTU:
651 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
652 rxe_set_mtu(rxe, ndev->mtu);
655 if (netif_running(ndev) && netif_carrier_ok(ndev))
661 case NETDEV_GOING_DOWN:
662 case NETDEV_CHANGEADDR:
663 case NETDEV_CHANGENAME:
664 case NETDEV_FEAT_CHANGE:
666 pr_info("ignoring netdev event = %ld for %s\n",
674 struct notifier_block rxe_net_notifier = {
675 .notifier_call = rxe_notify,
678 static int rxe_net_ipv4_init(void)
680 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
681 htons(ROCE_V2_UDP_DPORT), false);
682 if (IS_ERR(recv_sockets.sk4)) {
683 recv_sockets.sk4 = NULL;
684 pr_err("Failed to create IPv4 UDP tunnel\n");
691 static int rxe_net_ipv6_init(void)
693 #if IS_ENABLED(CONFIG_IPV6)
695 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
696 htons(ROCE_V2_UDP_DPORT), true);
697 if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
698 recv_sockets.sk6 = NULL;
699 pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
703 if (IS_ERR(recv_sockets.sk6)) {
704 recv_sockets.sk6 = NULL;
705 pr_err("Failed to create IPv6 UDP tunnel\n");
712 void rxe_net_exit(void)
714 rxe_release_udp_tunnel(recv_sockets.sk6);
715 rxe_release_udp_tunnel(recv_sockets.sk4);
716 unregister_netdevice_notifier(&rxe_net_notifier);
719 int rxe_net_init(void)
723 recv_sockets.sk6 = NULL;
725 err = rxe_net_ipv4_init();
728 err = rxe_net_ipv6_init();
731 err = register_netdevice_notifier(&rxe_net_notifier);
733 pr_err("Failed to register netdev notifier\n");