2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/errno.h>
20 #include <linux/kernel.h>
21 #include <linux/sched/signal.h>
22 #include <linux/slab.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/poll.h>
26 #include <linux/security.h>
28 #include <asm/ebcdic.h>
29 #include <asm/cpcmd.h>
30 #include <linux/kmod.h>
32 #include <net/iucv/af_iucv.h>
36 static char iucv_userid[80];
38 static const struct proto_ops iucv_sock_ops;
40 static struct proto iucv_proto = {
43 .obj_size = sizeof(struct iucv_sock),
46 static struct iucv_interface *pr_iucv;
48 /* special AF_IUCV IPRM messages */
49 static const u8 iprm_shutdown[8] =
50 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
52 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
54 #define __iucv_sock_wait(sk, condition, timeo, ret) \
56 DEFINE_WAIT(__wait); \
57 long __timeo = timeo; \
59 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
60 while (!(condition)) { \
65 if (signal_pending(current)) { \
66 ret = sock_intr_errno(__timeo); \
70 __timeo = schedule_timeout(__timeo); \
72 ret = sock_error(sk); \
76 finish_wait(sk_sleep(sk), &__wait); \
79 #define iucv_sock_wait(sk, condition, timeo) \
83 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 static void iucv_sock_kill(struct sock *sk);
88 static void iucv_sock_close(struct sock *sk);
89 static void iucv_sever_path(struct sock *, int);
91 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
92 struct packet_type *pt, struct net_device *orig_dev);
93 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
94 struct sk_buff *skb, u8 flags);
95 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
97 /* Call Back functions */
98 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
99 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
100 static void iucv_callback_connack(struct iucv_path *, u8 *);
101 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
102 static void iucv_callback_connrej(struct iucv_path *, u8 *);
103 static void iucv_callback_shutdown(struct iucv_path *, u8 *);
105 static struct iucv_sock_list iucv_sk_list = {
106 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
107 .autobind_name = ATOMIC_INIT(0)
110 static struct iucv_handler af_iucv_handler = {
111 .path_pending = iucv_callback_connreq,
112 .path_complete = iucv_callback_connack,
113 .path_severed = iucv_callback_connrej,
114 .message_pending = iucv_callback_rx,
115 .message_complete = iucv_callback_txdone,
116 .path_quiesced = iucv_callback_shutdown,
119 static inline void high_nmcpy(unsigned char *dst, char *src)
124 static inline void low_nmcpy(unsigned char *dst, char *src)
126 memcpy(&dst[8], src, 8);
129 static int afiucv_pm_prepare(struct device *dev)
131 #ifdef CONFIG_PM_DEBUG
132 printk(KERN_WARNING "afiucv_pm_prepare\n");
137 static void afiucv_pm_complete(struct device *dev)
139 #ifdef CONFIG_PM_DEBUG
140 printk(KERN_WARNING "afiucv_pm_complete\n");
145 * afiucv_pm_freeze() - Freeze PM callback
146 * @dev: AFIUCV dummy device
148 * Sever all established IUCV communication pathes
150 static int afiucv_pm_freeze(struct device *dev)
152 struct iucv_sock *iucv;
155 #ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING "afiucv_pm_freeze\n");
158 read_lock(&iucv_sk_list.lock);
159 sk_for_each(sk, &iucv_sk_list.head) {
161 switch (sk->sk_state) {
165 iucv_sever_path(sk, 0);
174 skb_queue_purge(&iucv->send_skb_q);
175 skb_queue_purge(&iucv->backlog_skb_q);
177 read_unlock(&iucv_sk_list.lock);
182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
183 * @dev: AFIUCV dummy device
185 * socket clean up after freeze
187 static int afiucv_pm_restore_thaw(struct device *dev)
191 #ifdef CONFIG_PM_DEBUG
192 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
194 read_lock(&iucv_sk_list.lock);
195 sk_for_each(sk, &iucv_sk_list.head) {
196 switch (sk->sk_state) {
199 sk->sk_state = IUCV_DISCONN;
200 sk->sk_state_change(sk);
211 read_unlock(&iucv_sk_list.lock);
215 static const struct dev_pm_ops afiucv_pm_ops = {
216 .prepare = afiucv_pm_prepare,
217 .complete = afiucv_pm_complete,
218 .freeze = afiucv_pm_freeze,
219 .thaw = afiucv_pm_restore_thaw,
220 .restore = afiucv_pm_restore_thaw,
223 static struct device_driver af_iucv_driver = {
224 .owner = THIS_MODULE,
227 .pm = &afiucv_pm_ops,
230 /* dummy device used as trigger for PM functions */
231 static struct device *af_iucv_dev;
234 * iucv_msg_length() - Returns the length of an iucv message.
235 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
237 * The function returns the length of the specified iucv message @msg of data
238 * stored in a buffer and of data stored in the parameter list (PRMDATA).
240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
242 * PRMDATA[0..6] socket data (max 7 bytes);
243 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
245 * The socket data length is computed by subtracting the socket data length
247 * If the socket data len is greater 7, then PRMDATA can be used for special
248 * notifications (see iucv_sock_shutdown); and further,
249 * if the socket data len is > 7, the function returns 8.
251 * Use this function to allocate socket buffers to store iucv message data.
253 static inline size_t iucv_msg_length(struct iucv_message *msg)
257 if (msg->flags & IUCV_IPRMDATA) {
258 datalen = 0xff - msg->rmmsg[7];
259 return (datalen < 8) ? datalen : 8;
265 * iucv_sock_in_state() - check for specific states
266 * @sk: sock structure
267 * @state: first iucv sk state
268 * @state: second iucv sk state
270 * Returns true if the socket in either in the first or second state.
272 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
274 return (sk->sk_state == state || sk->sk_state == state2);
278 * iucv_below_msglim() - function to check if messages can be sent
279 * @sk: sock structure
281 * Returns true if the send queue length is lower than the message limit.
282 * Always returns true if the socket is not connected (no iucv path for
283 * checking the message limit).
285 static inline int iucv_below_msglim(struct sock *sk)
287 struct iucv_sock *iucv = iucv_sk(sk);
289 if (sk->sk_state != IUCV_CONNECTED)
291 if (iucv->transport == AF_IUCV_TRANS_IUCV)
292 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
294 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
295 (atomic_read(&iucv->pendings) <= 0));
299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
301 static void iucv_sock_wake_msglim(struct sock *sk)
303 struct socket_wq *wq;
306 wq = rcu_dereference(sk->sk_wq);
307 if (skwq_has_sleeper(wq))
308 wake_up_interruptible_all(&wq->wait);
309 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
314 * afiucv_hs_send() - send a message through HiperSockets transport
316 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
317 struct sk_buff *skb, u8 flags)
319 struct iucv_sock *iucv = iucv_sk(sock);
320 struct af_iucv_trans_hdr *phs_hdr;
321 struct sk_buff *nskb;
322 int err, confirm_recv = 0;
324 memset(skb->head, 0, ETH_HLEN);
325 phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr));
326 skb_reset_mac_header(skb);
327 skb_reset_network_header(skb);
328 skb_push(skb, ETH_HLEN);
329 skb_reset_mac_header(skb);
330 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
332 phs_hdr->magic = ETH_P_AF_IUCV;
333 phs_hdr->version = 1;
334 phs_hdr->flags = flags;
335 if (flags == AF_IUCV_FLAG_SYN)
336 phs_hdr->window = iucv->msglimit;
337 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
338 confirm_recv = atomic_read(&iucv->msg_recv);
339 phs_hdr->window = confirm_recv;
341 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
343 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
344 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
345 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
346 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
347 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
348 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
349 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
350 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
352 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
354 skb->dev = iucv->hs_dev;
360 dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
362 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
366 if (skb->len > skb->dev->mtu) {
367 if (sock->sk_type == SOCK_SEQPACKET) {
371 skb_trim(skb, skb->dev->mtu);
373 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
375 __skb_header_release(skb);
376 nskb = skb_clone(skb, GFP_ATOMIC);
382 skb_queue_tail(&iucv->send_skb_q, nskb);
383 err = dev_queue_xmit(skb);
384 if (net_xmit_eval(err)) {
385 skb_unlink(nskb, &iucv->send_skb_q);
388 atomic_sub(confirm_recv, &iucv->msg_recv);
389 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
391 return net_xmit_eval(err);
398 static struct sock *__iucv_get_sock_by_name(char *nm)
402 sk_for_each(sk, &iucv_sk_list.head)
403 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
409 static void iucv_sock_destruct(struct sock *sk)
411 skb_queue_purge(&sk->sk_receive_queue);
412 skb_queue_purge(&sk->sk_error_queue);
416 if (!sock_flag(sk, SOCK_DEAD)) {
417 pr_err("Attempt to release alive iucv socket %p\n", sk);
421 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
422 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
423 WARN_ON(sk->sk_wmem_queued);
424 WARN_ON(sk->sk_forward_alloc);
428 static void iucv_sock_cleanup_listen(struct sock *parent)
432 /* Close non-accepted connections */
433 while ((sk = iucv_accept_dequeue(parent, NULL))) {
438 parent->sk_state = IUCV_CLOSED;
441 /* Kill socket (only if zapped and orphaned) */
442 static void iucv_sock_kill(struct sock *sk)
444 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
447 iucv_sock_unlink(&iucv_sk_list, sk);
448 sock_set_flag(sk, SOCK_DEAD);
452 /* Terminate an IUCV path */
453 static void iucv_sever_path(struct sock *sk, int with_user_data)
455 unsigned char user_data[16];
456 struct iucv_sock *iucv = iucv_sk(sk);
457 struct iucv_path *path = iucv->path;
461 if (with_user_data) {
462 low_nmcpy(user_data, iucv->src_name);
463 high_nmcpy(user_data, iucv->dst_name);
464 ASCEBC(user_data, sizeof(user_data));
465 pr_iucv->path_sever(path, user_data);
467 pr_iucv->path_sever(path, NULL);
468 iucv_path_free(path);
472 /* Send controlling flags through an IUCV socket for HIPER transport */
473 static int iucv_send_ctrl(struct sock *sk, u8 flags)
475 struct iucv_sock *iucv = iucv_sk(sk);
481 blen = sizeof(struct af_iucv_trans_hdr) +
482 LL_RESERVED_SPACE(iucv->hs_dev);
483 if (sk->sk_shutdown & SEND_SHUTDOWN) {
484 /* controlling flags should be sent anyway */
485 shutdown = sk->sk_shutdown;
486 sk->sk_shutdown &= RCV_SHUTDOWN;
488 skb = sock_alloc_send_skb(sk, blen, 1, &err);
490 skb_reserve(skb, blen);
491 err = afiucv_hs_send(NULL, sk, skb, flags);
494 sk->sk_shutdown = shutdown;
498 /* Close an IUCV socket */
499 static void iucv_sock_close(struct sock *sk)
501 struct iucv_sock *iucv = iucv_sk(sk);
507 switch (sk->sk_state) {
509 iucv_sock_cleanup_listen(sk);
513 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
514 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
515 sk->sk_state = IUCV_DISCONN;
516 sk->sk_state_change(sk);
518 case IUCV_DISCONN: /* fall through */
519 sk->sk_state = IUCV_CLOSING;
520 sk->sk_state_change(sk);
522 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
523 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
524 timeo = sk->sk_lingertime;
526 timeo = IUCV_DISCONN_TIMEOUT;
528 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
532 case IUCV_CLOSING: /* fall through */
533 sk->sk_state = IUCV_CLOSED;
534 sk->sk_state_change(sk);
536 sk->sk_err = ECONNRESET;
537 sk->sk_state_change(sk);
539 skb_queue_purge(&iucv->send_skb_q);
540 skb_queue_purge(&iucv->backlog_skb_q);
542 default: /* fall through */
543 iucv_sever_path(sk, 1);
547 dev_put(iucv->hs_dev);
549 sk->sk_bound_dev_if = 0;
552 /* mark socket for deletion by iucv_sock_kill() */
553 sock_set_flag(sk, SOCK_ZAPPED);
558 static void iucv_sock_init(struct sock *sk, struct sock *parent)
561 sk->sk_type = parent->sk_type;
562 security_sk_clone(parent, sk);
566 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
569 struct iucv_sock *iucv;
571 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
576 sock_init_data(sock, sk);
577 INIT_LIST_HEAD(&iucv->accept_q);
578 spin_lock_init(&iucv->accept_q_lock);
579 skb_queue_head_init(&iucv->send_skb_q);
580 INIT_LIST_HEAD(&iucv->message_q.list);
581 spin_lock_init(&iucv->message_q.lock);
582 skb_queue_head_init(&iucv->backlog_skb_q);
584 atomic_set(&iucv->pendings, 0);
587 atomic_set(&iucv->msg_sent, 0);
588 atomic_set(&iucv->msg_recv, 0);
590 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
591 memset(&iucv->src_user_id , 0, 32);
593 iucv->transport = AF_IUCV_TRANS_IUCV;
595 iucv->transport = AF_IUCV_TRANS_HIPER;
597 sk->sk_destruct = iucv_sock_destruct;
598 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
599 sk->sk_allocation = GFP_DMA;
601 sock_reset_flag(sk, SOCK_ZAPPED);
603 sk->sk_protocol = proto;
604 sk->sk_state = IUCV_OPEN;
606 iucv_sock_link(&iucv_sk_list, sk);
610 /* Create an IUCV socket */
611 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
616 if (protocol && protocol != PF_IUCV)
617 return -EPROTONOSUPPORT;
619 sock->state = SS_UNCONNECTED;
621 switch (sock->type) {
623 sock->ops = &iucv_sock_ops;
626 /* currently, proto ops can handle both sk types */
627 sock->ops = &iucv_sock_ops;
630 return -ESOCKTNOSUPPORT;
633 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
637 iucv_sock_init(sk, NULL);
642 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
644 write_lock_bh(&l->lock);
645 sk_add_node(sk, &l->head);
646 write_unlock_bh(&l->lock);
649 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
651 write_lock_bh(&l->lock);
652 sk_del_node_init(sk);
653 write_unlock_bh(&l->lock);
656 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
659 struct iucv_sock *par = iucv_sk(parent);
662 spin_lock_irqsave(&par->accept_q_lock, flags);
663 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
664 spin_unlock_irqrestore(&par->accept_q_lock, flags);
665 iucv_sk(sk)->parent = parent;
666 sk_acceptq_added(parent);
669 void iucv_accept_unlink(struct sock *sk)
672 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
674 spin_lock_irqsave(&par->accept_q_lock, flags);
675 list_del_init(&iucv_sk(sk)->accept_q);
676 spin_unlock_irqrestore(&par->accept_q_lock, flags);
677 sk_acceptq_removed(iucv_sk(sk)->parent);
678 iucv_sk(sk)->parent = NULL;
682 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
684 struct iucv_sock *isk, *n;
687 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
688 sk = (struct sock *) isk;
691 if (sk->sk_state == IUCV_CLOSED) {
692 iucv_accept_unlink(sk);
697 if (sk->sk_state == IUCV_CONNECTED ||
698 sk->sk_state == IUCV_DISCONN ||
700 iucv_accept_unlink(sk);
702 sock_graft(sk, newsock);
713 static void __iucv_auto_name(struct iucv_sock *iucv)
717 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
718 while (__iucv_get_sock_by_name(name)) {
719 sprintf(name, "%08x",
720 atomic_inc_return(&iucv_sk_list.autobind_name));
722 memcpy(iucv->src_name, name, 8);
725 /* Bind an unbound socket */
726 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
729 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
730 struct sock *sk = sock->sk;
731 struct iucv_sock *iucv;
733 struct net_device *dev;
736 /* Verify the input sockaddr */
737 if (addr_len < sizeof(struct sockaddr_iucv) ||
738 addr->sa_family != AF_IUCV)
742 if (sk->sk_state != IUCV_OPEN) {
747 write_lock_bh(&iucv_sk_list.lock);
750 if (__iucv_get_sock_by_name(sa->siucv_name)) {
757 /* Bind the socket */
759 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
760 goto vm_bind; /* VM IUCV transport */
762 /* try hiper transport */
763 memcpy(uid, sa->siucv_user_id, sizeof(uid));
766 for_each_netdev_rcu(&init_net, dev) {
767 if (!memcmp(dev->perm_addr, uid, 8)) {
768 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
769 /* Check for unitialized siucv_name */
770 if (strncmp(sa->siucv_name, " ", 8) == 0)
771 __iucv_auto_name(iucv);
773 memcpy(iucv->src_name, sa->siucv_name, 8);
774 sk->sk_bound_dev_if = dev->ifindex;
777 sk->sk_state = IUCV_BOUND;
778 iucv->transport = AF_IUCV_TRANS_HIPER;
780 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
788 /* use local userid for backward compat */
789 memcpy(iucv->src_name, sa->siucv_name, 8);
790 memcpy(iucv->src_user_id, iucv_userid, 8);
791 sk->sk_state = IUCV_BOUND;
792 iucv->transport = AF_IUCV_TRANS_IUCV;
794 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
797 /* found no dev to bind */
800 /* Release the socket list lock */
801 write_unlock_bh(&iucv_sk_list.lock);
807 /* Automatically bind an unbound socket */
808 static int iucv_sock_autobind(struct sock *sk)
810 struct iucv_sock *iucv = iucv_sk(sk);
813 if (unlikely(!pr_iucv))
816 memcpy(iucv->src_user_id, iucv_userid, 8);
818 write_lock_bh(&iucv_sk_list.lock);
819 __iucv_auto_name(iucv);
820 write_unlock_bh(&iucv_sk_list.lock);
823 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
828 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
830 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
831 struct sock *sk = sock->sk;
832 struct iucv_sock *iucv = iucv_sk(sk);
833 unsigned char user_data[16];
836 high_nmcpy(user_data, sa->siucv_name);
837 low_nmcpy(user_data, iucv->src_name);
838 ASCEBC(user_data, sizeof(user_data));
841 iucv->path = iucv_path_alloc(iucv->msglimit,
842 IUCV_IPRMDATA, GFP_KERNEL);
847 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
848 sa->siucv_user_id, NULL, user_data,
851 iucv_path_free(iucv->path);
854 case 0x0b: /* Target communicator is not logged on */
857 case 0x0d: /* Max connections for this guest exceeded */
858 case 0x0e: /* Max connections for target guest exceeded */
861 case 0x0f: /* Missing IUCV authorization */
873 /* Connect an unconnected socket */
874 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
877 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
878 struct sock *sk = sock->sk;
879 struct iucv_sock *iucv = iucv_sk(sk);
882 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
885 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
888 if (sk->sk_state == IUCV_OPEN &&
889 iucv->transport == AF_IUCV_TRANS_HIPER)
890 return -EBADFD; /* explicit bind required */
892 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
895 if (sk->sk_state == IUCV_OPEN) {
896 err = iucv_sock_autobind(sk);
903 /* Set the destination information */
904 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
905 memcpy(iucv->dst_name, sa->siucv_name, 8);
907 if (iucv->transport == AF_IUCV_TRANS_HIPER)
908 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
910 err = afiucv_path_connect(sock, addr);
914 if (sk->sk_state != IUCV_CONNECTED)
915 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
917 sock_sndtimeo(sk, flags & O_NONBLOCK));
919 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
922 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
923 iucv_sever_path(sk, 0);
930 /* Move a socket into listening state. */
931 static int iucv_sock_listen(struct socket *sock, int backlog)
933 struct sock *sk = sock->sk;
939 if (sk->sk_state != IUCV_BOUND)
942 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
945 sk->sk_max_ack_backlog = backlog;
946 sk->sk_ack_backlog = 0;
947 sk->sk_state = IUCV_LISTEN;
955 /* Accept a pending connection */
956 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
957 int flags, bool kern)
959 DECLARE_WAITQUEUE(wait, current);
960 struct sock *sk = sock->sk, *nsk;
964 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
966 if (sk->sk_state != IUCV_LISTEN) {
971 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
973 /* Wait for an incoming connection */
974 add_wait_queue_exclusive(sk_sleep(sk), &wait);
975 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
976 set_current_state(TASK_INTERRUPTIBLE);
983 timeo = schedule_timeout(timeo);
984 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
986 if (sk->sk_state != IUCV_LISTEN) {
991 if (signal_pending(current)) {
992 err = sock_intr_errno(timeo);
997 set_current_state(TASK_RUNNING);
998 remove_wait_queue(sk_sleep(sk), &wait);
1003 newsock->state = SS_CONNECTED;
1010 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
1013 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
1014 struct sock *sk = sock->sk;
1015 struct iucv_sock *iucv = iucv_sk(sk);
1017 addr->sa_family = AF_IUCV;
1020 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1021 memcpy(siucv->siucv_name, iucv->dst_name, 8);
1023 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1024 memcpy(siucv->siucv_name, iucv->src_name, 8);
1026 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1027 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1028 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1030 return sizeof(struct sockaddr_iucv);
1034 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1036 * @msg: Pointer to a struct iucv_message
1037 * @skb: The socket data to send, skb->len MUST BE <= 7
1039 * Send the socket data in the parameter list in the iucv message
1040 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1041 * list and the socket data len at index 7 (last byte).
1042 * See also iucv_msg_length().
1044 * Returns the error code from the iucv_message_send() call.
1046 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1047 struct sk_buff *skb)
1051 memcpy(prmdata, (void *) skb->data, skb->len);
1052 prmdata[7] = 0xff - (u8) skb->len;
1053 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1054 (void *) prmdata, 8);
1057 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1060 struct sock *sk = sock->sk;
1061 struct iucv_sock *iucv = iucv_sk(sk);
1062 size_t headroom = 0;
1064 struct sk_buff *skb;
1065 struct iucv_message txmsg = {0};
1066 struct cmsghdr *cmsg;
1072 int noblock = msg->msg_flags & MSG_DONTWAIT;
1074 err = sock_error(sk);
1078 if (msg->msg_flags & MSG_OOB)
1081 /* SOCK_SEQPACKET: we do not support segmented records */
1082 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1087 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1092 /* Return if the socket is not in connected state */
1093 if (sk->sk_state != IUCV_CONNECTED) {
1098 /* initialize defaults */
1099 cmsg_done = 0; /* check for duplicate headers */
1102 /* iterate over control messages */
1103 for_each_cmsghdr(cmsg, msg) {
1104 if (!CMSG_OK(msg, cmsg)) {
1109 if (cmsg->cmsg_level != SOL_IUCV)
1112 if (cmsg->cmsg_type & cmsg_done) {
1116 cmsg_done |= cmsg->cmsg_type;
1118 switch (cmsg->cmsg_type) {
1119 case SCM_IUCV_TRGCLS:
1120 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1125 /* set iucv message target class */
1126 memcpy(&txmsg.class,
1127 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1137 /* allocate one skb for each iucv message:
1138 * this is fine for SOCK_SEQPACKET (unless we want to support
1139 * segmented records using the MSG_EOR flag), but
1140 * for SOCK_STREAM we might want to improve it in future */
1141 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1142 headroom = sizeof(struct af_iucv_trans_hdr) +
1143 LL_RESERVED_SPACE(iucv->hs_dev);
1146 if (len < PAGE_SIZE) {
1149 /* In nonlinear "classic" iucv skb,
1150 * reserve space for iucv_array
1152 headroom = sizeof(struct iucv_array) *
1153 (MAX_SKB_FRAGS + 1);
1154 linear = PAGE_SIZE - headroom;
1157 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1162 skb_reserve(skb, headroom);
1163 skb_put(skb, linear);
1165 skb->data_len = len - linear;
1166 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1170 /* wait if outstanding messages for iucv path has reached */
1171 timeo = sock_sndtimeo(sk, noblock);
1172 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1176 /* return -ECONNRESET if the socket is no longer connected */
1177 if (sk->sk_state != IUCV_CONNECTED) {
1182 /* increment and save iucv message tag for msg_completion cbk */
1183 txmsg.tag = iucv->send_tag++;
1184 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1186 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1187 atomic_inc(&iucv->msg_sent);
1188 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1190 atomic_dec(&iucv->msg_sent);
1193 } else { /* Classic VM IUCV transport */
1194 skb_queue_tail(&iucv->send_skb_q, skb);
1196 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1198 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1200 /* on success: there is no message_complete callback */
1201 /* for an IPRMDATA msg; remove skb from send queue */
1203 skb_unlink(skb, &iucv->send_skb_q);
1207 /* this error should never happen since the */
1208 /* IUCV_IPRMDATA path flag is set... sever path */
1210 pr_iucv->path_sever(iucv->path, NULL);
1211 skb_unlink(skb, &iucv->send_skb_q);
1215 } else if (skb_is_nonlinear(skb)) {
1216 struct iucv_array *iba = (struct iucv_array *)skb->head;
1219 /* skip iucv_array lying in the headroom */
1220 iba[0].address = (u32)(addr_t)skb->data;
1221 iba[0].length = (u32)skb_headlen(skb);
1222 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1223 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1225 iba[i + 1].address =
1226 (u32)(addr_t)skb_frag_address(frag);
1227 iba[i + 1].length = (u32)skb_frag_size(frag);
1229 err = pr_iucv->message_send(iucv->path, &txmsg,
1231 (void *)iba, skb->len);
1232 } else { /* non-IPRM Linear skb */
1233 err = pr_iucv->message_send(iucv->path, &txmsg,
1234 0, 0, (void *)skb->data, skb->len);
1239 memcpy(user_id, iucv->dst_user_id, 8);
1241 memcpy(appl_id, iucv->dst_name, 8);
1243 "Application %s on z/VM guest %s exceeds message limit\n",
1249 skb_unlink(skb, &iucv->send_skb_q);
1264 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1266 size_t headroom, linear;
1267 struct sk_buff *skb;
1270 if (len < PAGE_SIZE) {
1274 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1275 linear = PAGE_SIZE - headroom;
1277 skb = alloc_skb_with_frags(headroom + linear, len - linear,
1278 0, &err, GFP_ATOMIC | GFP_DMA);
1280 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1284 skb_reserve(skb, headroom);
1285 skb_put(skb, linear);
1287 skb->data_len = len - linear;
1292 /* iucv_process_message() - Receive a single outstanding IUCV message
1294 * Locking: must be called with message_q.lock held
1296 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1297 struct iucv_path *path,
1298 struct iucv_message *msg)
1303 len = iucv_msg_length(msg);
1305 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1306 /* Note: the first 4 bytes are reserved for msg tag */
1307 IUCV_SKB_CB(skb)->class = msg->class;
1309 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1310 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1311 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1316 if (skb_is_nonlinear(skb)) {
1317 struct iucv_array *iba = (struct iucv_array *)skb->head;
1320 iba[0].address = (u32)(addr_t)skb->data;
1321 iba[0].length = (u32)skb_headlen(skb);
1322 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1325 iba[i + 1].address =
1326 (u32)(addr_t)skb_frag_address(frag);
1327 iba[i + 1].length = (u32)skb_frag_size(frag);
1329 rc = pr_iucv->message_receive(path, msg,
1331 (void *)iba, len, NULL);
1333 rc = pr_iucv->message_receive(path, msg,
1334 msg->flags & IUCV_IPRMDATA,
1335 skb->data, len, NULL);
1341 WARN_ON_ONCE(skb->len != len);
1344 IUCV_SKB_CB(skb)->offset = 0;
1345 if (sk_filter(sk, skb)) {
1346 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
1350 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
1351 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1354 /* iucv_process_message_q() - Process outstanding IUCV messages
1356 * Locking: must be called with message_q.lock held
1358 static void iucv_process_message_q(struct sock *sk)
1360 struct iucv_sock *iucv = iucv_sk(sk);
1361 struct sk_buff *skb;
1362 struct sock_msg_q *p, *n;
1364 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1365 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1368 iucv_process_message(sk, skb, p->path, &p->msg);
1371 if (!skb_queue_empty(&iucv->backlog_skb_q))
1376 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1377 size_t len, int flags)
1379 int noblock = flags & MSG_DONTWAIT;
1380 struct sock *sk = sock->sk;
1381 struct iucv_sock *iucv = iucv_sk(sk);
1382 unsigned int copied, rlen;
1383 struct sk_buff *skb, *rskb, *cskb;
1387 if ((sk->sk_state == IUCV_DISCONN) &&
1388 skb_queue_empty(&iucv->backlog_skb_q) &&
1389 skb_queue_empty(&sk->sk_receive_queue) &&
1390 list_empty(&iucv->message_q.list))
1393 if (flags & (MSG_OOB))
1396 /* receive/dequeue next skb:
1397 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1398 skb = skb_recv_datagram(sk, flags, noblock, &err);
1400 if (sk->sk_shutdown & RCV_SHUTDOWN)
1405 offset = IUCV_SKB_CB(skb)->offset;
1406 rlen = skb->len - offset; /* real length of skb */
1407 copied = min_t(unsigned int, rlen, len);
1409 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1412 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1413 if (!(flags & MSG_PEEK))
1414 skb_queue_head(&sk->sk_receive_queue, skb);
1418 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1419 if (sk->sk_type == SOCK_SEQPACKET) {
1421 msg->msg_flags |= MSG_TRUNC;
1422 /* each iucv message contains a complete record */
1423 msg->msg_flags |= MSG_EOR;
1426 /* create control message to store iucv msg target class:
1427 * get the trgcls from the control buffer of the skb due to
1428 * fragmentation of original iucv message. */
1429 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1430 sizeof(IUCV_SKB_CB(skb)->class),
1431 (void *)&IUCV_SKB_CB(skb)->class);
1433 if (!(flags & MSG_PEEK))
1434 skb_queue_head(&sk->sk_receive_queue, skb);
1438 /* Mark read part of skb as used */
1439 if (!(flags & MSG_PEEK)) {
1441 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1442 if (sk->sk_type == SOCK_STREAM) {
1443 if (copied < rlen) {
1444 IUCV_SKB_CB(skb)->offset = offset + copied;
1445 skb_queue_head(&sk->sk_receive_queue, skb);
1451 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1452 atomic_inc(&iucv->msg_recv);
1453 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1455 iucv_sock_close(sk);
1460 /* Queue backlog skbs */
1461 spin_lock_bh(&iucv->message_q.lock);
1462 rskb = skb_dequeue(&iucv->backlog_skb_q);
1464 IUCV_SKB_CB(rskb)->offset = 0;
1465 if (__sock_queue_rcv_skb(sk, rskb)) {
1466 /* handle rcv queue full */
1467 skb_queue_head(&iucv->backlog_skb_q,
1471 rskb = skb_dequeue(&iucv->backlog_skb_q);
1473 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1474 if (!list_empty(&iucv->message_q.list))
1475 iucv_process_message_q(sk);
1476 if (atomic_read(&iucv->msg_recv) >=
1477 iucv->msglimit / 2) {
1478 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1480 sk->sk_state = IUCV_DISCONN;
1481 sk->sk_state_change(sk);
1485 spin_unlock_bh(&iucv->message_q.lock);
1489 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1490 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1496 static inline __poll_t iucv_accept_poll(struct sock *parent)
1498 struct iucv_sock *isk, *n;
1501 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1502 sk = (struct sock *) isk;
1504 if (sk->sk_state == IUCV_CONNECTED)
1505 return EPOLLIN | EPOLLRDNORM;
1511 __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1514 struct sock *sk = sock->sk;
1517 sock_poll_wait(file, sock, wait);
1519 if (sk->sk_state == IUCV_LISTEN)
1520 return iucv_accept_poll(sk);
1522 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1524 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1526 if (sk->sk_shutdown & RCV_SHUTDOWN)
1529 if (sk->sk_shutdown == SHUTDOWN_MASK)
1532 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1533 (sk->sk_shutdown & RCV_SHUTDOWN))
1534 mask |= EPOLLIN | EPOLLRDNORM;
1536 if (sk->sk_state == IUCV_CLOSED)
1539 if (sk->sk_state == IUCV_DISCONN)
1542 if (sock_writeable(sk) && iucv_below_msglim(sk))
1543 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1545 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1550 static int iucv_sock_shutdown(struct socket *sock, int how)
1552 struct sock *sk = sock->sk;
1553 struct iucv_sock *iucv = iucv_sk(sk);
1554 struct iucv_message txmsg;
1559 if ((how & ~SHUTDOWN_MASK) || !how)
1563 switch (sk->sk_state) {
1574 if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
1575 sk->sk_state == IUCV_CONNECTED) {
1576 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1579 err = pr_iucv->message_send(iucv->path, &txmsg,
1580 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1595 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1598 sk->sk_shutdown |= how;
1599 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1600 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1602 err = pr_iucv->path_quiesce(iucv->path, NULL);
1605 /* skb_queue_purge(&sk->sk_receive_queue); */
1607 skb_queue_purge(&sk->sk_receive_queue);
1610 /* Wake up anyone sleeping in poll */
1611 sk->sk_state_change(sk);
1618 static int iucv_sock_release(struct socket *sock)
1620 struct sock *sk = sock->sk;
1626 iucv_sock_close(sk);
1633 /* getsockopt and setsockopt */
1634 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1635 char __user *optval, unsigned int optlen)
1637 struct sock *sk = sock->sk;
1638 struct iucv_sock *iucv = iucv_sk(sk);
1642 if (level != SOL_IUCV)
1643 return -ENOPROTOOPT;
1645 if (optlen < sizeof(int))
1648 if (get_user(val, (int __user *) optval))
1655 case SO_IPRMDATA_MSG:
1657 iucv->flags |= IUCV_IPRMDATA;
1659 iucv->flags &= ~IUCV_IPRMDATA;
1662 switch (sk->sk_state) {
1665 if (val < 1 || val > (u16)(~0))
1668 iucv->msglimit = val;
1684 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1685 char __user *optval, int __user *optlen)
1687 struct sock *sk = sock->sk;
1688 struct iucv_sock *iucv = iucv_sk(sk);
1692 if (level != SOL_IUCV)
1693 return -ENOPROTOOPT;
1695 if (get_user(len, optlen))
1701 len = min_t(unsigned int, len, sizeof(int));
1704 case SO_IPRMDATA_MSG:
1705 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1709 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1710 : iucv->msglimit; /* default */
1714 if (sk->sk_state == IUCV_OPEN)
1716 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1717 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1721 return -ENOPROTOOPT;
1724 if (put_user(len, optlen))
1726 if (copy_to_user(optval, &val, len))
1733 /* Callback wrappers - called from iucv base support */
1734 static int iucv_callback_connreq(struct iucv_path *path,
1735 u8 ipvmid[8], u8 ipuser[16])
1737 unsigned char user_data[16];
1738 unsigned char nuser_data[16];
1739 unsigned char src_name[8];
1740 struct sock *sk, *nsk;
1741 struct iucv_sock *iucv, *niucv;
1744 memcpy(src_name, ipuser, 8);
1745 EBCASC(src_name, 8);
1746 /* Find out if this path belongs to af_iucv. */
1747 read_lock(&iucv_sk_list.lock);
1750 sk_for_each(sk, &iucv_sk_list.head)
1751 if (sk->sk_state == IUCV_LISTEN &&
1752 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1754 * Found a listening socket with
1755 * src_name == ipuser[0-7].
1760 read_unlock(&iucv_sk_list.lock);
1762 /* No socket found, not one of our paths. */
1767 /* Check if parent socket is listening */
1768 low_nmcpy(user_data, iucv->src_name);
1769 high_nmcpy(user_data, iucv->dst_name);
1770 ASCEBC(user_data, sizeof(user_data));
1771 if (sk->sk_state != IUCV_LISTEN) {
1772 err = pr_iucv->path_sever(path, user_data);
1773 iucv_path_free(path);
1777 /* Check for backlog size */
1778 if (sk_acceptq_is_full(sk)) {
1779 err = pr_iucv->path_sever(path, user_data);
1780 iucv_path_free(path);
1784 /* Create the new socket */
1785 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1787 err = pr_iucv->path_sever(path, user_data);
1788 iucv_path_free(path);
1792 niucv = iucv_sk(nsk);
1793 iucv_sock_init(nsk, sk);
1795 /* Set the new iucv_sock */
1796 memcpy(niucv->dst_name, ipuser + 8, 8);
1797 EBCASC(niucv->dst_name, 8);
1798 memcpy(niucv->dst_user_id, ipvmid, 8);
1799 memcpy(niucv->src_name, iucv->src_name, 8);
1800 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1803 /* Call iucv_accept */
1804 high_nmcpy(nuser_data, ipuser + 8);
1805 memcpy(nuser_data + 8, niucv->src_name, 8);
1806 ASCEBC(nuser_data + 8, 8);
1808 /* set message limit for path based on msglimit of accepting socket */
1809 niucv->msglimit = iucv->msglimit;
1810 path->msglim = iucv->msglimit;
1811 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1813 iucv_sever_path(nsk, 1);
1814 iucv_sock_kill(nsk);
1818 iucv_accept_enqueue(sk, nsk);
1820 /* Wake up accept */
1821 nsk->sk_state = IUCV_CONNECTED;
1822 sk->sk_data_ready(sk);
1829 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1831 struct sock *sk = path->private;
1833 sk->sk_state = IUCV_CONNECTED;
1834 sk->sk_state_change(sk);
1837 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1839 struct sock *sk = path->private;
1840 struct iucv_sock *iucv = iucv_sk(sk);
1841 struct sk_buff *skb;
1842 struct sock_msg_q *save_msg;
1845 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1846 pr_iucv->message_reject(path, msg);
1850 spin_lock(&iucv->message_q.lock);
1852 if (!list_empty(&iucv->message_q.list) ||
1853 !skb_queue_empty(&iucv->backlog_skb_q))
1856 len = atomic_read(&sk->sk_rmem_alloc);
1857 len += SKB_TRUESIZE(iucv_msg_length(msg));
1858 if (len > sk->sk_rcvbuf)
1861 skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1865 iucv_process_message(sk, skb, path, msg);
1869 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1872 save_msg->path = path;
1873 save_msg->msg = *msg;
1875 list_add_tail(&save_msg->list, &iucv->message_q.list);
1878 spin_unlock(&iucv->message_q.lock);
1881 static void iucv_callback_txdone(struct iucv_path *path,
1882 struct iucv_message *msg)
1884 struct sock *sk = path->private;
1885 struct sk_buff *this = NULL;
1886 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1887 struct sk_buff *list_skb = list->next;
1888 unsigned long flags;
1891 if (!skb_queue_empty(list)) {
1892 spin_lock_irqsave(&list->lock, flags);
1894 while (list_skb != (struct sk_buff *)list) {
1895 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1899 list_skb = list_skb->next;
1902 __skb_unlink(this, list);
1904 spin_unlock_irqrestore(&list->lock, flags);
1908 /* wake up any process waiting for sending */
1909 iucv_sock_wake_msglim(sk);
1913 if (sk->sk_state == IUCV_CLOSING) {
1914 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1915 sk->sk_state = IUCV_CLOSED;
1916 sk->sk_state_change(sk);
1923 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1925 struct sock *sk = path->private;
1927 if (sk->sk_state == IUCV_CLOSED)
1931 iucv_sever_path(sk, 1);
1932 sk->sk_state = IUCV_DISCONN;
1934 sk->sk_state_change(sk);
1938 /* called if the other communication side shuts down its RECV direction;
1939 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1941 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1943 struct sock *sk = path->private;
1946 if (sk->sk_state != IUCV_CLOSED) {
1947 sk->sk_shutdown |= SEND_SHUTDOWN;
1948 sk->sk_state_change(sk);
1953 /***************** HiperSockets transport callbacks ********************/
1954 static void afiucv_swap_src_dest(struct sk_buff *skb)
1956 struct af_iucv_trans_hdr *trans_hdr =
1957 (struct af_iucv_trans_hdr *)skb->data;
1961 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1962 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1963 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1964 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1965 memcpy(tmpID, trans_hdr->srcUserID, 8);
1966 memcpy(tmpName, trans_hdr->srcAppName, 8);
1967 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1968 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1969 memcpy(trans_hdr->destUserID, tmpID, 8);
1970 memcpy(trans_hdr->destAppName, tmpName, 8);
1971 skb_push(skb, ETH_HLEN);
1972 memset(skb->data, 0, ETH_HLEN);
1976 * afiucv_hs_callback_syn - react on received SYN
1978 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1981 struct iucv_sock *iucv, *niucv;
1982 struct af_iucv_trans_hdr *trans_hdr;
1986 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1988 /* no sock - connection refused */
1989 afiucv_swap_src_dest(skb);
1990 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1991 err = dev_queue_xmit(skb);
1995 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1997 if ((sk->sk_state != IUCV_LISTEN) ||
1998 sk_acceptq_is_full(sk) ||
2000 /* error on server socket - connection refused */
2001 afiucv_swap_src_dest(skb);
2002 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
2003 err = dev_queue_xmit(skb);
2004 iucv_sock_kill(nsk);
2009 niucv = iucv_sk(nsk);
2010 iucv_sock_init(nsk, sk);
2011 niucv->transport = AF_IUCV_TRANS_HIPER;
2012 niucv->msglimit = iucv->msglimit;
2013 if (!trans_hdr->window)
2014 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
2016 niucv->msglimit_peer = trans_hdr->window;
2017 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
2018 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
2019 memcpy(niucv->src_name, iucv->src_name, 8);
2020 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
2021 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
2022 niucv->hs_dev = iucv->hs_dev;
2023 dev_hold(niucv->hs_dev);
2024 afiucv_swap_src_dest(skb);
2025 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
2026 trans_hdr->window = niucv->msglimit;
2027 /* if receiver acks the xmit connection is established */
2028 err = dev_queue_xmit(skb);
2030 iucv_accept_enqueue(sk, nsk);
2031 nsk->sk_state = IUCV_CONNECTED;
2032 sk->sk_data_ready(sk);
2034 iucv_sock_kill(nsk);
2038 return NET_RX_SUCCESS;
2042 * afiucv_hs_callback_synack() - react on received SYN-ACK
2044 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2046 struct iucv_sock *iucv = iucv_sk(sk);
2047 struct af_iucv_trans_hdr *trans_hdr =
2048 (struct af_iucv_trans_hdr *)skb->data;
2052 if (sk->sk_state != IUCV_BOUND)
2055 iucv->msglimit_peer = trans_hdr->window;
2056 sk->sk_state = IUCV_CONNECTED;
2057 sk->sk_state_change(sk);
2061 return NET_RX_SUCCESS;
2065 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2067 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2069 struct iucv_sock *iucv = iucv_sk(sk);
2073 if (sk->sk_state != IUCV_BOUND)
2076 sk->sk_state = IUCV_DISCONN;
2077 sk->sk_state_change(sk);
2081 return NET_RX_SUCCESS;
2085 * afiucv_hs_callback_fin() - react on received FIN
2087 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2089 struct iucv_sock *iucv = iucv_sk(sk);
2091 /* other end of connection closed */
2095 if (sk->sk_state == IUCV_CONNECTED) {
2096 sk->sk_state = IUCV_DISCONN;
2097 sk->sk_state_change(sk);
2102 return NET_RX_SUCCESS;
2106 * afiucv_hs_callback_win() - react on received WIN
2108 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2110 struct iucv_sock *iucv = iucv_sk(sk);
2111 struct af_iucv_trans_hdr *trans_hdr =
2112 (struct af_iucv_trans_hdr *)skb->data;
2115 return NET_RX_SUCCESS;
2117 if (sk->sk_state != IUCV_CONNECTED)
2118 return NET_RX_SUCCESS;
2120 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2121 iucv_sock_wake_msglim(sk);
2122 return NET_RX_SUCCESS;
2126 * afiucv_hs_callback_rx() - react on received data
2128 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2130 struct iucv_sock *iucv = iucv_sk(sk);
2134 return NET_RX_SUCCESS;
2137 if (sk->sk_state != IUCV_CONNECTED) {
2139 return NET_RX_SUCCESS;
2142 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2144 return NET_RX_SUCCESS;
2147 /* write stuff from iucv_msg to skb cb */
2148 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2149 skb_reset_transport_header(skb);
2150 skb_reset_network_header(skb);
2151 IUCV_SKB_CB(skb)->offset = 0;
2152 if (sk_filter(sk, skb)) {
2153 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
2155 return NET_RX_SUCCESS;
2158 spin_lock(&iucv->message_q.lock);
2159 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2160 if (__sock_queue_rcv_skb(sk, skb))
2161 /* handle rcv queue full */
2162 skb_queue_tail(&iucv->backlog_skb_q, skb);
2164 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2165 spin_unlock(&iucv->message_q.lock);
2166 return NET_RX_SUCCESS;
2170 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2172 * called from netif RX softirq
2174 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2175 struct packet_type *pt, struct net_device *orig_dev)
2178 struct iucv_sock *iucv;
2179 struct af_iucv_trans_hdr *trans_hdr;
2180 int err = NET_RX_SUCCESS;
2183 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
2184 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2186 (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
2188 return NET_RX_SUCCESS;
2190 if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
2191 if (skb_linearize(skb)) {
2192 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2195 return NET_RX_SUCCESS;
2197 skb_pull(skb, ETH_HLEN);
2198 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2199 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2200 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2201 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2202 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2203 memset(nullstring, 0, sizeof(nullstring));
2206 read_lock(&iucv_sk_list.lock);
2207 sk_for_each(sk, &iucv_sk_list.head) {
2208 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2209 if ((!memcmp(&iucv_sk(sk)->src_name,
2210 trans_hdr->destAppName, 8)) &&
2211 (!memcmp(&iucv_sk(sk)->src_user_id,
2212 trans_hdr->destUserID, 8)) &&
2213 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2214 (!memcmp(&iucv_sk(sk)->dst_user_id,
2220 if ((!memcmp(&iucv_sk(sk)->src_name,
2221 trans_hdr->destAppName, 8)) &&
2222 (!memcmp(&iucv_sk(sk)->src_user_id,
2223 trans_hdr->destUserID, 8)) &&
2224 (!memcmp(&iucv_sk(sk)->dst_name,
2225 trans_hdr->srcAppName, 8)) &&
2226 (!memcmp(&iucv_sk(sk)->dst_user_id,
2227 trans_hdr->srcUserID, 8))) {
2233 read_unlock(&iucv_sk_list.lock);
2238 how should we send with no sock
2239 1) send without sock no send rc checking?
2240 2) introduce default sock to handle this cases
2242 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2244 SYN|ACK, SYN|FIN, FIN -> no action? */
2246 switch (trans_hdr->flags) {
2247 case AF_IUCV_FLAG_SYN:
2248 /* connect request */
2249 err = afiucv_hs_callback_syn(sk, skb);
2251 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2252 /* connect request confirmed */
2253 err = afiucv_hs_callback_synack(sk, skb);
2255 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2256 /* connect request refused */
2257 err = afiucv_hs_callback_synfin(sk, skb);
2259 case (AF_IUCV_FLAG_FIN):
2261 err = afiucv_hs_callback_fin(sk, skb);
2263 case (AF_IUCV_FLAG_WIN):
2264 err = afiucv_hs_callback_win(sk, skb);
2265 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2269 /* fall through and receive non-zero length data */
2270 case (AF_IUCV_FLAG_SHT):
2271 /* shutdown request */
2272 /* fall through and receive zero length data */
2274 /* plain data frame */
2275 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2276 err = afiucv_hs_callback_rx(sk, skb);
2286 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2289 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2290 enum iucv_tx_notify n)
2292 struct sock *isk = skb->sk;
2293 struct sock *sk = NULL;
2294 struct iucv_sock *iucv = NULL;
2295 struct sk_buff_head *list;
2296 struct sk_buff *list_skb;
2297 struct sk_buff *nskb;
2298 unsigned long flags;
2300 read_lock_irqsave(&iucv_sk_list.lock, flags);
2301 sk_for_each(sk, &iucv_sk_list.head)
2306 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2308 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2311 list = &iucv->send_skb_q;
2312 spin_lock_irqsave(&list->lock, flags);
2313 if (skb_queue_empty(list))
2315 list_skb = list->next;
2316 nskb = list_skb->next;
2317 while (list_skb != (struct sk_buff *)list) {
2318 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2321 __skb_unlink(list_skb, list);
2322 kfree_skb(list_skb);
2323 iucv_sock_wake_msglim(sk);
2325 case TX_NOTIFY_PENDING:
2326 atomic_inc(&iucv->pendings);
2328 case TX_NOTIFY_DELAYED_OK:
2329 __skb_unlink(list_skb, list);
2330 atomic_dec(&iucv->pendings);
2331 if (atomic_read(&iucv->pendings) <= 0)
2332 iucv_sock_wake_msglim(sk);
2333 kfree_skb(list_skb);
2335 case TX_NOTIFY_UNREACHABLE:
2336 case TX_NOTIFY_DELAYED_UNREACHABLE:
2337 case TX_NOTIFY_TPQFULL: /* not yet used */
2338 case TX_NOTIFY_GENERALERROR:
2339 case TX_NOTIFY_DELAYED_GENERALERROR:
2340 __skb_unlink(list_skb, list);
2341 kfree_skb(list_skb);
2342 if (sk->sk_state == IUCV_CONNECTED) {
2343 sk->sk_state = IUCV_DISCONN;
2344 sk->sk_state_change(sk);
2354 spin_unlock_irqrestore(&list->lock, flags);
2356 if (sk->sk_state == IUCV_CLOSING) {
2357 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2358 sk->sk_state = IUCV_CLOSED;
2359 sk->sk_state_change(sk);
2366 * afiucv_netdev_event: handle netdev notifier chain events
2368 static int afiucv_netdev_event(struct notifier_block *this,
2369 unsigned long event, void *ptr)
2371 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2373 struct iucv_sock *iucv;
2377 case NETDEV_GOING_DOWN:
2378 sk_for_each(sk, &iucv_sk_list.head) {
2380 if ((iucv->hs_dev == event_dev) &&
2381 (sk->sk_state == IUCV_CONNECTED)) {
2382 if (event == NETDEV_GOING_DOWN)
2383 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2384 sk->sk_state = IUCV_DISCONN;
2385 sk->sk_state_change(sk);
2390 case NETDEV_UNREGISTER:
2397 static struct notifier_block afiucv_netdev_notifier = {
2398 .notifier_call = afiucv_netdev_event,
2401 static const struct proto_ops iucv_sock_ops = {
2403 .owner = THIS_MODULE,
2404 .release = iucv_sock_release,
2405 .bind = iucv_sock_bind,
2406 .connect = iucv_sock_connect,
2407 .listen = iucv_sock_listen,
2408 .accept = iucv_sock_accept,
2409 .getname = iucv_sock_getname,
2410 .sendmsg = iucv_sock_sendmsg,
2411 .recvmsg = iucv_sock_recvmsg,
2412 .poll = iucv_sock_poll,
2413 .ioctl = sock_no_ioctl,
2414 .mmap = sock_no_mmap,
2415 .socketpair = sock_no_socketpair,
2416 .shutdown = iucv_sock_shutdown,
2417 .setsockopt = iucv_sock_setsockopt,
2418 .getsockopt = iucv_sock_getsockopt,
2421 static const struct net_proto_family iucv_sock_family_ops = {
2423 .owner = THIS_MODULE,
2424 .create = iucv_sock_create,
2427 static struct packet_type iucv_packet_type = {
2428 .type = cpu_to_be16(ETH_P_AF_IUCV),
2429 .func = afiucv_hs_rcv,
2432 static int afiucv_iucv_init(void)
2436 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2439 /* establish dummy device */
2440 af_iucv_driver.bus = pr_iucv->bus;
2441 err = driver_register(&af_iucv_driver);
2444 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2449 dev_set_name(af_iucv_dev, "af_iucv");
2450 af_iucv_dev->bus = pr_iucv->bus;
2451 af_iucv_dev->parent = pr_iucv->root;
2452 af_iucv_dev->release = (void (*)(struct device *))kfree;
2453 af_iucv_dev->driver = &af_iucv_driver;
2454 err = device_register(af_iucv_dev);
2460 put_device(af_iucv_dev);
2462 driver_unregister(&af_iucv_driver);
2464 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2469 static void afiucv_iucv_exit(void)
2471 device_unregister(af_iucv_dev);
2472 driver_unregister(&af_iucv_driver);
2473 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2476 static int __init afiucv_init(void)
2480 if (MACHINE_IS_VM) {
2481 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2482 if (unlikely(err)) {
2484 err = -EPROTONOSUPPORT;
2488 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2490 printk(KERN_WARNING "iucv_if lookup failed\n");
2491 memset(&iucv_userid, 0, sizeof(iucv_userid));
2494 memset(&iucv_userid, 0, sizeof(iucv_userid));
2498 err = proto_register(&iucv_proto, 0);
2501 err = sock_register(&iucv_sock_family_ops);
2506 err = afiucv_iucv_init();
2511 err = register_netdevice_notifier(&afiucv_netdev_notifier);
2515 dev_add_pack(&iucv_packet_type);
2522 sock_unregister(PF_IUCV);
2524 proto_unregister(&iucv_proto);
2527 symbol_put(iucv_if);
2531 static void __exit afiucv_exit(void)
2535 symbol_put(iucv_if);
2538 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2539 dev_remove_pack(&iucv_packet_type);
2540 sock_unregister(PF_IUCV);
2541 proto_unregister(&iucv_proto);
2544 module_init(afiucv_init);
2545 module_exit(afiucv_exit);
2547 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2548 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2549 MODULE_VERSION(VERSION);
2550 MODULE_LICENSE("GPL");
2551 MODULE_ALIAS_NETPROTO(PF_IUCV);