2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * Monitoring SMC transport protocol sockets
6 * Copyright IBM Corp. 2016
8 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/sock_diag.h>
16 #include <linux/inet_diag.h>
17 #include <linux/smc_diag.h>
18 #include <net/netlink.h>
24 static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
26 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
27 be16_to_cpu(((__be16 *)gid_raw)[0]),
28 be16_to_cpu(((__be16 *)gid_raw)[1]),
29 be16_to_cpu(((__be16 *)gid_raw)[2]),
30 be16_to_cpu(((__be16 *)gid_raw)[3]),
31 be16_to_cpu(((__be16 *)gid_raw)[4]),
32 be16_to_cpu(((__be16 *)gid_raw)[5]),
33 be16_to_cpu(((__be16 *)gid_raw)[6]),
34 be16_to_cpu(((__be16 *)gid_raw)[7]));
37 static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
39 struct smc_sock *smc = smc_sk(sk);
41 memset(r, 0, sizeof(*r));
42 r->diag_family = sk->sk_family;
43 sock_diag_save_cookie(sk, r->id.idiag_cookie);
46 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
47 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
48 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
49 if (sk->sk_protocol == SMCPROTO_SMC) {
50 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
51 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
52 #if IS_ENABLED(CONFIG_IPV6)
53 } else if (sk->sk_protocol == SMCPROTO_SMC6) {
54 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
55 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
56 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
57 sizeof(smc->clcsock->sk->sk_v6_daddr));
62 static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
63 struct smc_diag_msg *r,
64 struct user_namespace *user_ns)
66 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
69 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
70 r->diag_inode = sock_i_ino(sk);
74 static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
75 struct netlink_callback *cb,
76 const struct smc_diag_req *req,
79 struct smc_sock *smc = smc_sk(sk);
80 struct smc_diag_fallback fallback;
81 struct user_namespace *user_ns;
82 struct smc_diag_msg *r;
85 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
86 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
91 smc_diag_msg_common_fill(r, sk);
92 r->diag_state = sk->sk_state;
93 if (smc->use_fallback)
94 r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
95 else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
96 r->diag_mode = SMC_DIAG_MODE_SMCD;
98 r->diag_mode = SMC_DIAG_MODE_SMCR;
99 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
100 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
103 fallback.reason = smc->fallback_rsn;
104 fallback.peer_diagnosis = smc->peer_diagnosis;
105 if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
108 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
109 smc->conn.alert_token_local) {
110 struct smc_connection *conn = &smc->conn;
111 struct smc_diag_conninfo cinfo = {
112 .token = conn->alert_token_local,
113 .sndbuf_size = conn->sndbuf_desc ?
114 conn->sndbuf_desc->len : 0,
115 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
116 .peer_rmbe_size = conn->peer_rmbe_size,
118 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
119 .rx_prod.count = conn->local_rx_ctrl.prod.count,
120 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
121 .rx_cons.count = conn->local_rx_ctrl.cons.count,
123 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
124 .tx_prod.count = conn->local_tx_ctrl.prod.count,
125 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
126 .tx_cons.count = conn->local_tx_ctrl.cons.count,
129 *(u8 *)&conn->local_tx_ctrl.prod_flags,
130 .tx_conn_state_flags =
131 *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
132 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
133 .rx_conn_state_flags =
134 *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
136 .tx_prep.wrap = conn->tx_curs_prep.wrap,
137 .tx_prep.count = conn->tx_curs_prep.count,
138 .tx_sent.wrap = conn->tx_curs_sent.wrap,
139 .tx_sent.count = conn->tx_curs_sent.count,
140 .tx_fin.wrap = conn->tx_curs_fin.wrap,
141 .tx_fin.count = conn->tx_curs_fin.count,
144 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
148 if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
149 (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
150 !list_empty(&smc->conn.lgr->list)) {
151 struct smc_diag_lgrinfo linfo = {
152 .role = smc->conn.lgr->role,
153 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
154 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
157 memcpy(linfo.lnk[0].ibname,
158 smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
159 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
160 smc_gid_be16_convert(linfo.lnk[0].gid,
161 smc->conn.lgr->lnk[0].gid);
162 smc_gid_be16_convert(linfo.lnk[0].peer_gid,
163 smc->conn.lgr->lnk[0].peer_gid);
165 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
168 if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
169 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
170 !list_empty(&smc->conn.lgr->list)) {
171 struct smc_connection *conn = &smc->conn;
172 struct smcd_diag_dmbinfo dinfo;
174 memset(&dinfo, 0, sizeof(dinfo));
176 dinfo.linkid = *((u32 *)conn->lgr->id);
177 dinfo.peer_gid = conn->lgr->peer_gid;
178 dinfo.my_gid = conn->lgr->smcd->local_gid;
179 dinfo.token = conn->rmb_desc->token;
180 dinfo.peer_token = conn->peer_token;
182 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
190 nlmsg_cancel(skb, nlh);
194 static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
195 struct netlink_callback *cb)
197 struct net *net = sock_net(skb->sk);
198 struct nlattr *bc = NULL;
199 struct hlist_head *head;
203 read_lock(&prot->h.smc_hash->lock);
204 head = &prot->h.smc_hash->ht;
205 if (hlist_empty(head))
208 sk_for_each(sk, head) {
209 if (!net_eq(sock_net(sk), net))
211 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
217 read_unlock(&prot->h.smc_hash->lock);
221 static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
225 rc = smc_diag_dump_proto(&smc_proto, skb, cb);
227 rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
231 static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
233 struct net *net = sock_net(skb->sk);
235 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
236 h->nlmsg_flags & NLM_F_DUMP) {
238 struct netlink_dump_control c = {
239 .dump = smc_diag_dump,
240 .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
242 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
248 static const struct sock_diag_handler smc_diag_handler = {
250 .dump = smc_diag_handler_dump,
253 static int __init smc_diag_init(void)
255 return sock_diag_register(&smc_diag_handler);
258 static void __exit smc_diag_exit(void)
260 sock_diag_unregister(&smc_diag_handler);
263 module_init(smc_diag_init);
264 module_exit(smc_diag_exit);
265 MODULE_LICENSE("GPL");
266 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);