1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
12 /* check that QP matches packet opcode type and is in a valid state */
13 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
16 unsigned int pkt_type;
19 if (unlikely(!qp->valid))
22 pkt_type = pkt->opcode & 0xe0;
24 switch (qp_type(qp)) {
26 if (unlikely(pkt_type != IB_OPCODE_RC))
30 if (unlikely(pkt_type != IB_OPCODE_UC))
35 if (unlikely(pkt_type != IB_OPCODE_UD))
42 spin_lock_irqsave(&qp->state_lock, flags);
43 if (pkt->mask & RXE_REQ_MASK) {
44 if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
45 spin_unlock_irqrestore(&qp->state_lock, flags);
49 if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
50 spin_unlock_irqrestore(&qp->state_lock, flags);
54 spin_unlock_irqrestore(&qp->state_lock, flags);
59 static void set_bad_pkey_cntr(struct rxe_port *port)
61 spin_lock_bh(&port->port_lock);
62 port->attr.bad_pkey_cntr = min((u32)0xffff,
63 port->attr.bad_pkey_cntr + 1);
64 spin_unlock_bh(&port->port_lock);
67 static void set_qkey_viol_cntr(struct rxe_port *port)
69 spin_lock_bh(&port->port_lock);
70 port->attr.qkey_viol_cntr = min((u32)0xffff,
71 port->attr.qkey_viol_cntr + 1);
72 spin_unlock_bh(&port->port_lock);
75 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
76 u32 qpn, struct rxe_qp *qp)
78 struct rxe_port *port = &rxe->port;
79 u16 pkey = bth_pkey(pkt);
83 if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
84 set_bad_pkey_cntr(port);
88 if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
89 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
91 if (unlikely(deth_qkey(pkt) != qkey)) {
92 set_qkey_viol_cntr(port);
100 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
103 struct sk_buff *skb = PKT_TO_SKB(pkt);
105 if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
108 if (unlikely(pkt->port_num != qp->attr.port_num))
111 if (skb->protocol == htons(ETH_P_IP)) {
112 struct in_addr *saddr =
113 &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
114 struct in_addr *daddr =
115 &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
117 if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
118 (ip_hdr(skb)->saddr != daddr->s_addr))
121 } else if (skb->protocol == htons(ETH_P_IPV6)) {
122 struct in6_addr *saddr =
123 &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
124 struct in6_addr *daddr =
125 &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
127 if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
128 memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
135 static int hdr_check(struct rxe_pkt_info *pkt)
137 struct rxe_dev *rxe = pkt->rxe;
138 struct rxe_port *port = &rxe->port;
139 struct rxe_qp *qp = NULL;
140 u32 qpn = bth_qpn(pkt);
144 if (unlikely(bth_tver(pkt) != BTH_TVER))
147 if (unlikely(qpn == 0))
150 if (qpn != IB_MULTICAST_QPN) {
151 index = (qpn == 1) ? port->qp_gsi_index : qpn;
153 qp = rxe_pool_get_index(&rxe->qp_pool, index);
157 err = check_type_state(rxe, pkt, qp);
161 err = check_addr(rxe, pkt, qp);
165 err = check_keys(rxe, pkt, qpn, qp);
169 if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
182 static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
184 if (pkt->mask & RXE_REQ_MASK)
185 rxe_resp_queue_pkt(pkt->qp, skb);
187 rxe_comp_queue_pkt(pkt->qp, skb);
190 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
192 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
199 if (skb->protocol == htons(ETH_P_IP))
200 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
201 (struct in6_addr *)&dgid);
202 else if (skb->protocol == htons(ETH_P_IPV6))
203 memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
205 /* lookup mcast group corresponding to mgid, takes a ref */
206 mcg = rxe_lookup_mcg(rxe, &dgid);
208 goto drop; /* mcast group not registered */
210 spin_lock_bh(&rxe->mcg_lock);
212 /* this is unreliable datagram service so we let
213 * failures to deliver a multicast packet to a
214 * single QP happen and just move on and try
215 * the rest of them on the list
217 list_for_each_entry(mca, &mcg->qp_list, qp_list) {
220 /* validate qp for incoming packet */
221 err = check_type_state(rxe, pkt, qp);
225 err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
229 /* for all but the last QP create a new clone of the
230 * skb and pass to the QP. Pass the original skb to
231 * the last QP in the list.
233 if (mca->qp_list.next != &mcg->qp_list) {
234 struct sk_buff *cskb;
235 struct rxe_pkt_info *cpkt;
237 cskb = skb_clone(skb, GFP_ATOMIC);
241 if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
246 cpkt = SKB_TO_PKT(cskb);
249 rxe_rcv_pkt(cpkt, cskb);
253 rxe_rcv_pkt(pkt, skb);
254 skb = NULL; /* mark consumed */
258 spin_unlock_bh(&rxe->mcg_lock);
260 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
265 /* This only occurs if one of the checks fails on the last
266 * QP in the list above
271 ib_device_put(&rxe->ib_dev);
275 * rxe_chk_dgid - validate destination IP address
276 * @rxe: rxe device that received packet
277 * @skb: the received packet buffer
279 * Accept any loopback packets
280 * Extract IP address from packet and
281 * Accept if multicast packet
282 * Accept if matches an SGID table entry
284 static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
286 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
287 const struct ib_gid_attr *gid_attr;
291 if (pkt->mask & RXE_LOOPBACK_MASK)
294 if (skb->protocol == htons(ETH_P_IP)) {
295 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
296 (struct in6_addr *)&dgid);
299 pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
302 if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
305 gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
306 IB_GID_TYPE_ROCE_UDP_ENCAP,
308 if (IS_ERR(gid_attr))
309 return PTR_ERR(gid_attr);
311 rdma_put_gid_attr(gid_attr);
315 /* rxe_rcv is called from the interface driver */
316 void rxe_rcv(struct sk_buff *skb)
319 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
320 struct rxe_dev *rxe = pkt->rxe;
322 if (unlikely(skb->len < RXE_BTH_BYTES))
325 if (rxe_chk_dgid(rxe, skb) < 0)
328 pkt->opcode = bth_opcode(pkt);
329 pkt->psn = bth_psn(pkt);
331 pkt->mask |= rxe_opcode[pkt->opcode].mask;
333 if (unlikely(skb->len < header_size(pkt)))
336 err = hdr_check(pkt);
340 err = rxe_icrc_check(skb, pkt);
344 rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
346 if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
347 rxe_rcv_mcast_pkt(rxe, skb);
349 rxe_rcv_pkt(pkt, skb);
358 ib_device_put(&rxe->ib_dev);