2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static const struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 static struct iucv_interface *pr_iucv;
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
52 #define __iucv_sock_wait(sk, condition, timeo, ret) \
54 DEFINE_WAIT(__wait); \
55 long __timeo = timeo; \
57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
58 while (!(condition)) { \
63 if (signal_pending(current)) { \
64 ret = sock_intr_errno(__timeo); \
68 __timeo = schedule_timeout(__timeo); \
70 ret = sock_error(sk); \
74 finish_wait(sk_sleep(sk), &__wait); \
77 #define iucv_sock_wait(sk, condition, timeo) \
81 __iucv_sock_wait(sk, condition, timeo, __ret); \
85 static void iucv_sock_kill(struct sock *sk);
86 static void iucv_sock_close(struct sock *sk);
87 static void iucv_sever_path(struct sock *, int);
89 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
90 struct packet_type *pt, struct net_device *orig_dev);
91 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
92 struct sk_buff *skb, u8 flags);
93 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
95 /* Call Back functions */
96 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
97 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
98 static void iucv_callback_connack(struct iucv_path *, u8 *);
99 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
100 static void iucv_callback_connrej(struct iucv_path *, u8 *);
101 static void iucv_callback_shutdown(struct iucv_path *, u8 *);
103 static struct iucv_sock_list iucv_sk_list = {
104 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
105 .autobind_name = ATOMIC_INIT(0)
108 static struct iucv_handler af_iucv_handler = {
109 .path_pending = iucv_callback_connreq,
110 .path_complete = iucv_callback_connack,
111 .path_severed = iucv_callback_connrej,
112 .message_pending = iucv_callback_rx,
113 .message_complete = iucv_callback_txdone,
114 .path_quiesced = iucv_callback_shutdown,
117 static inline void high_nmcpy(unsigned char *dst, char *src)
122 static inline void low_nmcpy(unsigned char *dst, char *src)
124 memcpy(&dst[8], src, 8);
127 static int afiucv_pm_prepare(struct device *dev)
129 #ifdef CONFIG_PM_DEBUG
130 printk(KERN_WARNING "afiucv_pm_prepare\n");
135 static void afiucv_pm_complete(struct device *dev)
137 #ifdef CONFIG_PM_DEBUG
138 printk(KERN_WARNING "afiucv_pm_complete\n");
143 * afiucv_pm_freeze() - Freeze PM callback
144 * @dev: AFIUCV dummy device
146 * Sever all established IUCV communication pathes
148 static int afiucv_pm_freeze(struct device *dev)
150 struct iucv_sock *iucv;
154 #ifdef CONFIG_PM_DEBUG
155 printk(KERN_WARNING "afiucv_pm_freeze\n");
157 read_lock(&iucv_sk_list.lock);
158 sk_for_each(sk, &iucv_sk_list.head) {
160 switch (sk->sk_state) {
164 iucv_sever_path(sk, 0);
173 skb_queue_purge(&iucv->send_skb_q);
174 skb_queue_purge(&iucv->backlog_skb_q);
176 read_unlock(&iucv_sk_list.lock);
181 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
182 * @dev: AFIUCV dummy device
184 * socket clean up after freeze
186 static int afiucv_pm_restore_thaw(struct device *dev)
190 #ifdef CONFIG_PM_DEBUG
191 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
193 read_lock(&iucv_sk_list.lock);
194 sk_for_each(sk, &iucv_sk_list.head) {
195 switch (sk->sk_state) {
198 sk->sk_state = IUCV_DISCONN;
199 sk->sk_state_change(sk);
210 read_unlock(&iucv_sk_list.lock);
214 static const struct dev_pm_ops afiucv_pm_ops = {
215 .prepare = afiucv_pm_prepare,
216 .complete = afiucv_pm_complete,
217 .freeze = afiucv_pm_freeze,
218 .thaw = afiucv_pm_restore_thaw,
219 .restore = afiucv_pm_restore_thaw,
222 static struct device_driver af_iucv_driver = {
223 .owner = THIS_MODULE,
226 .pm = &afiucv_pm_ops,
229 /* dummy device used as trigger for PM functions */
230 static struct device *af_iucv_dev;
233 * iucv_msg_length() - Returns the length of an iucv message.
234 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
236 * The function returns the length of the specified iucv message @msg of data
237 * stored in a buffer and of data stored in the parameter list (PRMDATA).
239 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
241 * PRMDATA[0..6] socket data (max 7 bytes);
242 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
244 * The socket data length is computed by subtracting the socket data length
246 * If the socket data len is greater 7, then PRMDATA can be used for special
247 * notifications (see iucv_sock_shutdown); and further,
248 * if the socket data len is > 7, the function returns 8.
250 * Use this function to allocate socket buffers to store iucv message data.
252 static inline size_t iucv_msg_length(struct iucv_message *msg)
256 if (msg->flags & IUCV_IPRMDATA) {
257 datalen = 0xff - msg->rmmsg[7];
258 return (datalen < 8) ? datalen : 8;
264 * iucv_sock_in_state() - check for specific states
265 * @sk: sock structure
266 * @state: first iucv sk state
267 * @state: second iucv sk state
269 * Returns true if the socket in either in the first or second state.
271 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
273 return (sk->sk_state == state || sk->sk_state == state2);
277 * iucv_below_msglim() - function to check if messages can be sent
278 * @sk: sock structure
280 * Returns true if the send queue length is lower than the message limit.
281 * Always returns true if the socket is not connected (no iucv path for
282 * checking the message limit).
284 static inline int iucv_below_msglim(struct sock *sk)
286 struct iucv_sock *iucv = iucv_sk(sk);
288 if (sk->sk_state != IUCV_CONNECTED)
290 if (iucv->transport == AF_IUCV_TRANS_IUCV)
291 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
293 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
294 (atomic_read(&iucv->pendings) <= 0));
298 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
300 static void iucv_sock_wake_msglim(struct sock *sk)
302 struct socket_wq *wq;
305 wq = rcu_dereference(sk->sk_wq);
306 if (wq_has_sleeper(wq))
307 wake_up_interruptible_all(&wq->wait);
308 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
313 * afiucv_hs_send() - send a message through HiperSockets transport
315 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
316 struct sk_buff *skb, u8 flags)
318 struct iucv_sock *iucv = iucv_sk(sock);
319 struct af_iucv_trans_hdr *phs_hdr;
320 struct sk_buff *nskb;
321 int err, confirm_recv = 0;
323 memset(skb->head, 0, ETH_HLEN);
324 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
325 sizeof(struct af_iucv_trans_hdr));
326 skb_reset_mac_header(skb);
327 skb_reset_network_header(skb);
328 skb_push(skb, ETH_HLEN);
329 skb_reset_mac_header(skb);
330 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
332 phs_hdr->magic = ETH_P_AF_IUCV;
333 phs_hdr->version = 1;
334 phs_hdr->flags = flags;
335 if (flags == AF_IUCV_FLAG_SYN)
336 phs_hdr->window = iucv->msglimit;
337 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
338 confirm_recv = atomic_read(&iucv->msg_recv);
339 phs_hdr->window = confirm_recv;
341 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
343 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
344 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
345 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
346 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
347 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
348 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
349 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
350 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
352 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
354 skb->dev = iucv->hs_dev;
357 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
359 if (skb->len > skb->dev->mtu) {
360 if (sock->sk_type == SOCK_SEQPACKET)
363 skb_trim(skb, skb->dev->mtu);
365 skb->protocol = ETH_P_AF_IUCV;
366 nskb = skb_clone(skb, GFP_ATOMIC);
369 skb_queue_tail(&iucv->send_skb_q, nskb);
370 err = dev_queue_xmit(skb);
371 if (net_xmit_eval(err)) {
372 skb_unlink(nskb, &iucv->send_skb_q);
375 atomic_sub(confirm_recv, &iucv->msg_recv);
376 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
378 return net_xmit_eval(err);
381 static struct sock *__iucv_get_sock_by_name(char *nm)
385 sk_for_each(sk, &iucv_sk_list.head)
386 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
392 static void iucv_sock_destruct(struct sock *sk)
394 skb_queue_purge(&sk->sk_receive_queue);
395 skb_queue_purge(&sk->sk_error_queue);
399 if (!sock_flag(sk, SOCK_DEAD)) {
400 pr_err("Attempt to release alive iucv socket %p\n", sk);
404 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
405 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
406 WARN_ON(sk->sk_wmem_queued);
407 WARN_ON(sk->sk_forward_alloc);
411 static void iucv_sock_cleanup_listen(struct sock *parent)
415 /* Close non-accepted connections */
416 while ((sk = iucv_accept_dequeue(parent, NULL))) {
421 parent->sk_state = IUCV_CLOSED;
424 /* Kill socket (only if zapped and orphaned) */
425 static void iucv_sock_kill(struct sock *sk)
427 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
430 iucv_sock_unlink(&iucv_sk_list, sk);
431 sock_set_flag(sk, SOCK_DEAD);
435 /* Terminate an IUCV path */
436 static void iucv_sever_path(struct sock *sk, int with_user_data)
438 unsigned char user_data[16];
439 struct iucv_sock *iucv = iucv_sk(sk);
440 struct iucv_path *path = iucv->path;
444 if (with_user_data) {
445 low_nmcpy(user_data, iucv->src_name);
446 high_nmcpy(user_data, iucv->dst_name);
447 ASCEBC(user_data, sizeof(user_data));
448 pr_iucv->path_sever(path, user_data);
450 pr_iucv->path_sever(path, NULL);
451 iucv_path_free(path);
455 /* Send FIN through an IUCV socket for HIPER transport */
456 static int iucv_send_ctrl(struct sock *sk, u8 flags)
462 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
463 skb = sock_alloc_send_skb(sk, blen, 1, &err);
465 skb_reserve(skb, blen);
466 err = afiucv_hs_send(NULL, sk, skb, flags);
471 /* Close an IUCV socket */
472 static void iucv_sock_close(struct sock *sk)
474 struct iucv_sock *iucv = iucv_sk(sk);
480 switch (sk->sk_state) {
482 iucv_sock_cleanup_listen(sk);
486 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
487 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
488 sk->sk_state = IUCV_DISCONN;
489 sk->sk_state_change(sk);
491 case IUCV_DISCONN: /* fall through */
492 sk->sk_state = IUCV_CLOSING;
493 sk->sk_state_change(sk);
495 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
496 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
497 timeo = sk->sk_lingertime;
499 timeo = IUCV_DISCONN_TIMEOUT;
501 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
505 case IUCV_CLOSING: /* fall through */
506 sk->sk_state = IUCV_CLOSED;
507 sk->sk_state_change(sk);
509 sk->sk_err = ECONNRESET;
510 sk->sk_state_change(sk);
512 skb_queue_purge(&iucv->send_skb_q);
513 skb_queue_purge(&iucv->backlog_skb_q);
515 default: /* fall through */
516 iucv_sever_path(sk, 1);
520 dev_put(iucv->hs_dev);
522 sk->sk_bound_dev_if = 0;
525 /* mark socket for deletion by iucv_sock_kill() */
526 sock_set_flag(sk, SOCK_ZAPPED);
531 static void iucv_sock_init(struct sock *sk, struct sock *parent)
534 sk->sk_type = parent->sk_type;
537 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
540 struct iucv_sock *iucv;
542 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
547 sock_init_data(sock, sk);
548 INIT_LIST_HEAD(&iucv->accept_q);
549 spin_lock_init(&iucv->accept_q_lock);
550 skb_queue_head_init(&iucv->send_skb_q);
551 INIT_LIST_HEAD(&iucv->message_q.list);
552 spin_lock_init(&iucv->message_q.lock);
553 skb_queue_head_init(&iucv->backlog_skb_q);
555 atomic_set(&iucv->pendings, 0);
558 atomic_set(&iucv->msg_sent, 0);
559 atomic_set(&iucv->msg_recv, 0);
561 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
562 memset(&iucv->src_user_id , 0, 32);
564 iucv->transport = AF_IUCV_TRANS_IUCV;
566 iucv->transport = AF_IUCV_TRANS_HIPER;
568 sk->sk_destruct = iucv_sock_destruct;
569 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
570 sk->sk_allocation = GFP_DMA;
572 sock_reset_flag(sk, SOCK_ZAPPED);
574 sk->sk_protocol = proto;
575 sk->sk_state = IUCV_OPEN;
577 iucv_sock_link(&iucv_sk_list, sk);
581 /* Create an IUCV socket */
582 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
587 if (protocol && protocol != PF_IUCV)
588 return -EPROTONOSUPPORT;
590 sock->state = SS_UNCONNECTED;
592 switch (sock->type) {
594 sock->ops = &iucv_sock_ops;
597 /* currently, proto ops can handle both sk types */
598 sock->ops = &iucv_sock_ops;
601 return -ESOCKTNOSUPPORT;
604 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
608 iucv_sock_init(sk, NULL);
613 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
615 write_lock_bh(&l->lock);
616 sk_add_node(sk, &l->head);
617 write_unlock_bh(&l->lock);
620 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
622 write_lock_bh(&l->lock);
623 sk_del_node_init(sk);
624 write_unlock_bh(&l->lock);
627 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
630 struct iucv_sock *par = iucv_sk(parent);
633 spin_lock_irqsave(&par->accept_q_lock, flags);
634 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
635 spin_unlock_irqrestore(&par->accept_q_lock, flags);
636 iucv_sk(sk)->parent = parent;
637 sk_acceptq_added(parent);
640 void iucv_accept_unlink(struct sock *sk)
643 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
645 spin_lock_irqsave(&par->accept_q_lock, flags);
646 list_del_init(&iucv_sk(sk)->accept_q);
647 spin_unlock_irqrestore(&par->accept_q_lock, flags);
648 sk_acceptq_removed(iucv_sk(sk)->parent);
649 iucv_sk(sk)->parent = NULL;
653 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
655 struct iucv_sock *isk, *n;
658 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
659 sk = (struct sock *) isk;
662 if (sk->sk_state == IUCV_CLOSED) {
663 iucv_accept_unlink(sk);
668 if (sk->sk_state == IUCV_CONNECTED ||
669 sk->sk_state == IUCV_DISCONN ||
671 iucv_accept_unlink(sk);
673 sock_graft(sk, newsock);
684 static void __iucv_auto_name(struct iucv_sock *iucv)
688 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
689 while (__iucv_get_sock_by_name(name)) {
690 sprintf(name, "%08x",
691 atomic_inc_return(&iucv_sk_list.autobind_name));
693 memcpy(iucv->src_name, name, 8);
696 /* Bind an unbound socket */
697 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
700 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
701 struct sock *sk = sock->sk;
702 struct iucv_sock *iucv;
704 struct net_device *dev;
707 /* Verify the input sockaddr */
708 if (addr_len < sizeof(struct sockaddr_iucv) ||
709 addr->sa_family != AF_IUCV)
713 if (sk->sk_state != IUCV_OPEN) {
718 write_lock_bh(&iucv_sk_list.lock);
721 if (__iucv_get_sock_by_name(sa->siucv_name)) {
728 /* Bind the socket */
730 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
731 goto vm_bind; /* VM IUCV transport */
733 /* try hiper transport */
734 memcpy(uid, sa->siucv_user_id, sizeof(uid));
737 for_each_netdev_rcu(&init_net, dev) {
738 if (!memcmp(dev->perm_addr, uid, 8)) {
739 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
740 /* Check for unitialized siucv_name */
741 if (strncmp(sa->siucv_name, " ", 8) == 0)
742 __iucv_auto_name(iucv);
744 memcpy(iucv->src_name, sa->siucv_name, 8);
745 sk->sk_bound_dev_if = dev->ifindex;
748 sk->sk_state = IUCV_BOUND;
749 iucv->transport = AF_IUCV_TRANS_HIPER;
751 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
759 /* use local userid for backward compat */
760 memcpy(iucv->src_name, sa->siucv_name, 8);
761 memcpy(iucv->src_user_id, iucv_userid, 8);
762 sk->sk_state = IUCV_BOUND;
763 iucv->transport = AF_IUCV_TRANS_IUCV;
765 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
768 /* found no dev to bind */
771 /* Release the socket list lock */
772 write_unlock_bh(&iucv_sk_list.lock);
778 /* Automatically bind an unbound socket */
779 static int iucv_sock_autobind(struct sock *sk)
781 struct iucv_sock *iucv = iucv_sk(sk);
784 if (unlikely(!pr_iucv))
787 memcpy(iucv->src_user_id, iucv_userid, 8);
789 write_lock_bh(&iucv_sk_list.lock);
790 __iucv_auto_name(iucv);
791 write_unlock_bh(&iucv_sk_list.lock);
794 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
799 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
801 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
802 struct sock *sk = sock->sk;
803 struct iucv_sock *iucv = iucv_sk(sk);
804 unsigned char user_data[16];
807 high_nmcpy(user_data, sa->siucv_name);
808 low_nmcpy(user_data, iucv->src_name);
809 ASCEBC(user_data, sizeof(user_data));
812 iucv->path = iucv_path_alloc(iucv->msglimit,
813 IUCV_IPRMDATA, GFP_KERNEL);
818 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
819 sa->siucv_user_id, NULL, user_data,
822 iucv_path_free(iucv->path);
825 case 0x0b: /* Target communicator is not logged on */
828 case 0x0d: /* Max connections for this guest exceeded */
829 case 0x0e: /* Max connections for target guest exceeded */
832 case 0x0f: /* Missing IUCV authorization */
844 /* Connect an unconnected socket */
845 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
848 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
849 struct sock *sk = sock->sk;
850 struct iucv_sock *iucv = iucv_sk(sk);
853 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
856 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
859 if (sk->sk_state == IUCV_OPEN &&
860 iucv->transport == AF_IUCV_TRANS_HIPER)
861 return -EBADFD; /* explicit bind required */
863 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
866 if (sk->sk_state == IUCV_OPEN) {
867 err = iucv_sock_autobind(sk);
874 /* Set the destination information */
875 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
876 memcpy(iucv->dst_name, sa->siucv_name, 8);
878 if (iucv->transport == AF_IUCV_TRANS_HIPER)
879 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
881 err = afiucv_path_connect(sock, addr);
885 if (sk->sk_state != IUCV_CONNECTED)
886 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
888 sock_sndtimeo(sk, flags & O_NONBLOCK));
890 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
893 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
894 iucv_sever_path(sk, 0);
901 /* Move a socket into listening state. */
902 static int iucv_sock_listen(struct socket *sock, int backlog)
904 struct sock *sk = sock->sk;
910 if (sk->sk_state != IUCV_BOUND)
913 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
916 sk->sk_max_ack_backlog = backlog;
917 sk->sk_ack_backlog = 0;
918 sk->sk_state = IUCV_LISTEN;
926 /* Accept a pending connection */
927 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
930 DECLARE_WAITQUEUE(wait, current);
931 struct sock *sk = sock->sk, *nsk;
935 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
937 if (sk->sk_state != IUCV_LISTEN) {
942 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
944 /* Wait for an incoming connection */
945 add_wait_queue_exclusive(sk_sleep(sk), &wait);
946 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
947 set_current_state(TASK_INTERRUPTIBLE);
954 timeo = schedule_timeout(timeo);
955 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
957 if (sk->sk_state != IUCV_LISTEN) {
962 if (signal_pending(current)) {
963 err = sock_intr_errno(timeo);
968 set_current_state(TASK_RUNNING);
969 remove_wait_queue(sk_sleep(sk), &wait);
974 newsock->state = SS_CONNECTED;
981 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
984 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
985 struct sock *sk = sock->sk;
986 struct iucv_sock *iucv = iucv_sk(sk);
988 addr->sa_family = AF_IUCV;
989 *len = sizeof(struct sockaddr_iucv);
992 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
993 memcpy(siucv->siucv_name, iucv->dst_name, 8);
995 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
996 memcpy(siucv->siucv_name, iucv->src_name, 8);
998 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
999 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1000 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1006 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1008 * @msg: Pointer to a struct iucv_message
1009 * @skb: The socket data to send, skb->len MUST BE <= 7
1011 * Send the socket data in the parameter list in the iucv message
1012 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1013 * list and the socket data len at index 7 (last byte).
1014 * See also iucv_msg_length().
1016 * Returns the error code from the iucv_message_send() call.
1018 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1019 struct sk_buff *skb)
1023 memcpy(prmdata, (void *) skb->data, skb->len);
1024 prmdata[7] = 0xff - (u8) skb->len;
1025 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1026 (void *) prmdata, 8);
1029 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1032 struct sock *sk = sock->sk;
1033 struct iucv_sock *iucv = iucv_sk(sk);
1034 struct sk_buff *skb;
1035 struct iucv_message txmsg;
1036 struct cmsghdr *cmsg;
1042 int noblock = msg->msg_flags & MSG_DONTWAIT;
1044 err = sock_error(sk);
1048 if (msg->msg_flags & MSG_OOB)
1051 /* SOCK_SEQPACKET: we do not support segmented records */
1052 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1057 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1062 /* Return if the socket is not in connected state */
1063 if (sk->sk_state != IUCV_CONNECTED) {
1068 /* initialize defaults */
1069 cmsg_done = 0; /* check for duplicate headers */
1072 /* iterate over control messages */
1073 for_each_cmsghdr(cmsg, msg) {
1074 if (!CMSG_OK(msg, cmsg)) {
1079 if (cmsg->cmsg_level != SOL_IUCV)
1082 if (cmsg->cmsg_type & cmsg_done) {
1086 cmsg_done |= cmsg->cmsg_type;
1088 switch (cmsg->cmsg_type) {
1089 case SCM_IUCV_TRGCLS:
1090 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1095 /* set iucv message target class */
1096 memcpy(&txmsg.class,
1097 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1107 /* allocate one skb for each iucv message:
1108 * this is fine for SOCK_SEQPACKET (unless we want to support
1109 * segmented records using the MSG_EOR flag), but
1110 * for SOCK_STREAM we might want to improve it in future */
1111 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1112 skb = sock_alloc_send_skb(sk,
1113 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1116 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1119 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1120 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1121 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1126 /* wait if outstanding messages for iucv path has reached */
1127 timeo = sock_sndtimeo(sk, noblock);
1128 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1132 /* return -ECONNRESET if the socket is no longer connected */
1133 if (sk->sk_state != IUCV_CONNECTED) {
1138 /* increment and save iucv message tag for msg_completion cbk */
1139 txmsg.tag = iucv->send_tag++;
1140 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1142 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1143 atomic_inc(&iucv->msg_sent);
1144 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1146 atomic_dec(&iucv->msg_sent);
1151 skb_queue_tail(&iucv->send_skb_q, skb);
1153 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1155 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1157 /* on success: there is no message_complete callback
1158 * for an IPRMDATA msg; remove skb from send queue */
1160 skb_unlink(skb, &iucv->send_skb_q);
1164 /* this error should never happen since the
1165 * IUCV_IPRMDATA path flag is set... sever path */
1167 pr_iucv->path_sever(iucv->path, NULL);
1168 skb_unlink(skb, &iucv->send_skb_q);
1173 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1174 (void *) skb->data, skb->len);
1178 memcpy(user_id, iucv->dst_user_id, 8);
1180 memcpy(appl_id, iucv->dst_name, 8);
1181 pr_err("Application %s on z/VM guest %s"
1182 " exceeds message limit\n",
1187 skb_unlink(skb, &iucv->send_skb_q);
1202 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1204 * Locking: must be called with message_q.lock held
1206 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1208 int dataleft, size, copied = 0;
1209 struct sk_buff *nskb;
1213 if (dataleft >= sk->sk_rcvbuf / 4)
1214 size = sk->sk_rcvbuf / 4;
1218 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1222 /* copy target class to control buffer of new skb */
1223 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1225 /* copy data fragment */
1226 memcpy(nskb->data, skb->data + copied, size);
1230 skb_reset_transport_header(nskb);
1231 skb_reset_network_header(nskb);
1234 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1240 /* iucv_process_message() - Receive a single outstanding IUCV message
1242 * Locking: must be called with message_q.lock held
1244 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1245 struct iucv_path *path,
1246 struct iucv_message *msg)
1251 len = iucv_msg_length(msg);
1253 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1254 /* Note: the first 4 bytes are reserved for msg tag */
1255 IUCV_SKB_CB(skb)->class = msg->class;
1257 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1258 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1259 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1264 rc = pr_iucv->message_receive(path, msg,
1265 msg->flags & IUCV_IPRMDATA,
1266 skb->data, len, NULL);
1271 /* we need to fragment iucv messages for SOCK_STREAM only;
1272 * for SOCK_SEQPACKET, it is only relevant if we support
1273 * record segmentation using MSG_EOR (see also recvmsg()) */
1274 if (sk->sk_type == SOCK_STREAM &&
1275 skb->truesize >= sk->sk_rcvbuf / 4) {
1276 rc = iucv_fragment_skb(sk, skb, len);
1280 pr_iucv->path_sever(path, NULL);
1283 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1285 skb_reset_transport_header(skb);
1286 skb_reset_network_header(skb);
1291 IUCV_SKB_CB(skb)->offset = 0;
1292 if (sock_queue_rcv_skb(sk, skb))
1293 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1296 /* iucv_process_message_q() - Process outstanding IUCV messages
1298 * Locking: must be called with message_q.lock held
1300 static void iucv_process_message_q(struct sock *sk)
1302 struct iucv_sock *iucv = iucv_sk(sk);
1303 struct sk_buff *skb;
1304 struct sock_msg_q *p, *n;
1306 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1307 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1310 iucv_process_message(sk, skb, p->path, &p->msg);
1313 if (!skb_queue_empty(&iucv->backlog_skb_q))
1318 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1319 size_t len, int flags)
1321 int noblock = flags & MSG_DONTWAIT;
1322 struct sock *sk = sock->sk;
1323 struct iucv_sock *iucv = iucv_sk(sk);
1324 unsigned int copied, rlen;
1325 struct sk_buff *skb, *rskb, *cskb;
1329 if ((sk->sk_state == IUCV_DISCONN) &&
1330 skb_queue_empty(&iucv->backlog_skb_q) &&
1331 skb_queue_empty(&sk->sk_receive_queue) &&
1332 list_empty(&iucv->message_q.list))
1335 if (flags & (MSG_OOB))
1338 /* receive/dequeue next skb:
1339 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1340 skb = skb_recv_datagram(sk, flags, noblock, &err);
1342 if (sk->sk_shutdown & RCV_SHUTDOWN)
1347 offset = IUCV_SKB_CB(skb)->offset;
1348 rlen = skb->len - offset; /* real length of skb */
1349 copied = min_t(unsigned int, rlen, len);
1351 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1354 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1355 if (!(flags & MSG_PEEK))
1356 skb_queue_head(&sk->sk_receive_queue, skb);
1360 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1361 if (sk->sk_type == SOCK_SEQPACKET) {
1363 msg->msg_flags |= MSG_TRUNC;
1364 /* each iucv message contains a complete record */
1365 msg->msg_flags |= MSG_EOR;
1368 /* create control message to store iucv msg target class:
1369 * get the trgcls from the control buffer of the skb due to
1370 * fragmentation of original iucv message. */
1371 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1372 sizeof(IUCV_SKB_CB(skb)->class),
1373 (void *)&IUCV_SKB_CB(skb)->class);
1375 if (!(flags & MSG_PEEK))
1376 skb_queue_head(&sk->sk_receive_queue, skb);
1380 /* Mark read part of skb as used */
1381 if (!(flags & MSG_PEEK)) {
1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 if (sk->sk_type == SOCK_STREAM) {
1385 if (copied < rlen) {
1386 IUCV_SKB_CB(skb)->offset = offset + copied;
1387 skb_queue_head(&sk->sk_receive_queue, skb);
1393 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1394 atomic_inc(&iucv->msg_recv);
1395 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1397 iucv_sock_close(sk);
1402 /* Queue backlog skbs */
1403 spin_lock_bh(&iucv->message_q.lock);
1404 rskb = skb_dequeue(&iucv->backlog_skb_q);
1406 IUCV_SKB_CB(rskb)->offset = 0;
1407 if (sock_queue_rcv_skb(sk, rskb)) {
1408 skb_queue_head(&iucv->backlog_skb_q,
1412 rskb = skb_dequeue(&iucv->backlog_skb_q);
1415 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1416 if (!list_empty(&iucv->message_q.list))
1417 iucv_process_message_q(sk);
1418 if (atomic_read(&iucv->msg_recv) >=
1419 iucv->msglimit / 2) {
1420 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1422 sk->sk_state = IUCV_DISCONN;
1423 sk->sk_state_change(sk);
1427 spin_unlock_bh(&iucv->message_q.lock);
1431 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1432 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1438 static inline unsigned int iucv_accept_poll(struct sock *parent)
1440 struct iucv_sock *isk, *n;
1443 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1444 sk = (struct sock *) isk;
1446 if (sk->sk_state == IUCV_CONNECTED)
1447 return POLLIN | POLLRDNORM;
1453 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1456 struct sock *sk = sock->sk;
1457 unsigned int mask = 0;
1459 sock_poll_wait(file, sk_sleep(sk), wait);
1461 if (sk->sk_state == IUCV_LISTEN)
1462 return iucv_accept_poll(sk);
1464 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1466 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1468 if (sk->sk_shutdown & RCV_SHUTDOWN)
1471 if (sk->sk_shutdown == SHUTDOWN_MASK)
1474 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1475 (sk->sk_shutdown & RCV_SHUTDOWN))
1476 mask |= POLLIN | POLLRDNORM;
1478 if (sk->sk_state == IUCV_CLOSED)
1481 if (sk->sk_state == IUCV_DISCONN)
1484 if (sock_writeable(sk) && iucv_below_msglim(sk))
1485 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1487 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1492 static int iucv_sock_shutdown(struct socket *sock, int how)
1494 struct sock *sk = sock->sk;
1495 struct iucv_sock *iucv = iucv_sk(sk);
1496 struct iucv_message txmsg;
1501 if ((how & ~SHUTDOWN_MASK) || !how)
1505 switch (sk->sk_state) {
1516 if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
1517 sk->sk_state == IUCV_CONNECTED) {
1518 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1521 err = pr_iucv->message_send(iucv->path, &txmsg,
1522 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1537 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1540 sk->sk_shutdown |= how;
1541 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1542 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1544 err = pr_iucv->path_quiesce(iucv->path, NULL);
1547 /* skb_queue_purge(&sk->sk_receive_queue); */
1549 skb_queue_purge(&sk->sk_receive_queue);
1552 /* Wake up anyone sleeping in poll */
1553 sk->sk_state_change(sk);
1560 static int iucv_sock_release(struct socket *sock)
1562 struct sock *sk = sock->sk;
1568 iucv_sock_close(sk);
1575 /* getsockopt and setsockopt */
1576 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1577 char __user *optval, unsigned int optlen)
1579 struct sock *sk = sock->sk;
1580 struct iucv_sock *iucv = iucv_sk(sk);
1584 if (level != SOL_IUCV)
1585 return -ENOPROTOOPT;
1587 if (optlen < sizeof(int))
1590 if (get_user(val, (int __user *) optval))
1597 case SO_IPRMDATA_MSG:
1599 iucv->flags |= IUCV_IPRMDATA;
1601 iucv->flags &= ~IUCV_IPRMDATA;
1604 switch (sk->sk_state) {
1607 if (val < 1 || val > (u16)(~0))
1610 iucv->msglimit = val;
1626 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1627 char __user *optval, int __user *optlen)
1629 struct sock *sk = sock->sk;
1630 struct iucv_sock *iucv = iucv_sk(sk);
1634 if (level != SOL_IUCV)
1635 return -ENOPROTOOPT;
1637 if (get_user(len, optlen))
1643 len = min_t(unsigned int, len, sizeof(int));
1646 case SO_IPRMDATA_MSG:
1647 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1651 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1652 : iucv->msglimit; /* default */
1656 if (sk->sk_state == IUCV_OPEN)
1658 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1659 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1663 return -ENOPROTOOPT;
1666 if (put_user(len, optlen))
1668 if (copy_to_user(optval, &val, len))
1675 /* Callback wrappers - called from iucv base support */
1676 static int iucv_callback_connreq(struct iucv_path *path,
1677 u8 ipvmid[8], u8 ipuser[16])
1679 unsigned char user_data[16];
1680 unsigned char nuser_data[16];
1681 unsigned char src_name[8];
1682 struct sock *sk, *nsk;
1683 struct iucv_sock *iucv, *niucv;
1686 memcpy(src_name, ipuser, 8);
1687 EBCASC(src_name, 8);
1688 /* Find out if this path belongs to af_iucv. */
1689 read_lock(&iucv_sk_list.lock);
1692 sk_for_each(sk, &iucv_sk_list.head)
1693 if (sk->sk_state == IUCV_LISTEN &&
1694 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1696 * Found a listening socket with
1697 * src_name == ipuser[0-7].
1702 read_unlock(&iucv_sk_list.lock);
1704 /* No socket found, not one of our paths. */
1709 /* Check if parent socket is listening */
1710 low_nmcpy(user_data, iucv->src_name);
1711 high_nmcpy(user_data, iucv->dst_name);
1712 ASCEBC(user_data, sizeof(user_data));
1713 if (sk->sk_state != IUCV_LISTEN) {
1714 err = pr_iucv->path_sever(path, user_data);
1715 iucv_path_free(path);
1719 /* Check for backlog size */
1720 if (sk_acceptq_is_full(sk)) {
1721 err = pr_iucv->path_sever(path, user_data);
1722 iucv_path_free(path);
1726 /* Create the new socket */
1727 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1729 err = pr_iucv->path_sever(path, user_data);
1730 iucv_path_free(path);
1734 niucv = iucv_sk(nsk);
1735 iucv_sock_init(nsk, sk);
1737 /* Set the new iucv_sock */
1738 memcpy(niucv->dst_name, ipuser + 8, 8);
1739 EBCASC(niucv->dst_name, 8);
1740 memcpy(niucv->dst_user_id, ipvmid, 8);
1741 memcpy(niucv->src_name, iucv->src_name, 8);
1742 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1745 /* Call iucv_accept */
1746 high_nmcpy(nuser_data, ipuser + 8);
1747 memcpy(nuser_data + 8, niucv->src_name, 8);
1748 ASCEBC(nuser_data + 8, 8);
1750 /* set message limit for path based on msglimit of accepting socket */
1751 niucv->msglimit = iucv->msglimit;
1752 path->msglim = iucv->msglimit;
1753 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1755 iucv_sever_path(nsk, 1);
1756 iucv_sock_kill(nsk);
1760 iucv_accept_enqueue(sk, nsk);
1762 /* Wake up accept */
1763 nsk->sk_state = IUCV_CONNECTED;
1764 sk->sk_data_ready(sk);
1771 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1773 struct sock *sk = path->private;
1775 sk->sk_state = IUCV_CONNECTED;
1776 sk->sk_state_change(sk);
1779 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1781 struct sock *sk = path->private;
1782 struct iucv_sock *iucv = iucv_sk(sk);
1783 struct sk_buff *skb;
1784 struct sock_msg_q *save_msg;
1787 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1788 pr_iucv->message_reject(path, msg);
1792 spin_lock(&iucv->message_q.lock);
1794 if (!list_empty(&iucv->message_q.list) ||
1795 !skb_queue_empty(&iucv->backlog_skb_q))
1798 len = atomic_read(&sk->sk_rmem_alloc);
1799 len += SKB_TRUESIZE(iucv_msg_length(msg));
1800 if (len > sk->sk_rcvbuf)
1803 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1807 iucv_process_message(sk, skb, path, msg);
1811 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1814 save_msg->path = path;
1815 save_msg->msg = *msg;
1817 list_add_tail(&save_msg->list, &iucv->message_q.list);
1820 spin_unlock(&iucv->message_q.lock);
1823 static void iucv_callback_txdone(struct iucv_path *path,
1824 struct iucv_message *msg)
1826 struct sock *sk = path->private;
1827 struct sk_buff *this = NULL;
1828 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1829 struct sk_buff *list_skb = list->next;
1830 unsigned long flags;
1833 if (!skb_queue_empty(list)) {
1834 spin_lock_irqsave(&list->lock, flags);
1836 while (list_skb != (struct sk_buff *)list) {
1837 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1841 list_skb = list_skb->next;
1844 __skb_unlink(this, list);
1846 spin_unlock_irqrestore(&list->lock, flags);
1850 /* wake up any process waiting for sending */
1851 iucv_sock_wake_msglim(sk);
1855 if (sk->sk_state == IUCV_CLOSING) {
1856 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1857 sk->sk_state = IUCV_CLOSED;
1858 sk->sk_state_change(sk);
1865 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1867 struct sock *sk = path->private;
1869 if (sk->sk_state == IUCV_CLOSED)
1873 iucv_sever_path(sk, 1);
1874 sk->sk_state = IUCV_DISCONN;
1876 sk->sk_state_change(sk);
1880 /* called if the other communication side shuts down its RECV direction;
1881 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1883 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1885 struct sock *sk = path->private;
1888 if (sk->sk_state != IUCV_CLOSED) {
1889 sk->sk_shutdown |= SEND_SHUTDOWN;
1890 sk->sk_state_change(sk);
1895 /***************** HiperSockets transport callbacks ********************/
1896 static void afiucv_swap_src_dest(struct sk_buff *skb)
1898 struct af_iucv_trans_hdr *trans_hdr =
1899 (struct af_iucv_trans_hdr *)skb->data;
1903 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1904 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1905 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1906 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1907 memcpy(tmpID, trans_hdr->srcUserID, 8);
1908 memcpy(tmpName, trans_hdr->srcAppName, 8);
1909 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1910 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1911 memcpy(trans_hdr->destUserID, tmpID, 8);
1912 memcpy(trans_hdr->destAppName, tmpName, 8);
1913 skb_push(skb, ETH_HLEN);
1914 memset(skb->data, 0, ETH_HLEN);
1918 * afiucv_hs_callback_syn - react on received SYN
1920 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1923 struct iucv_sock *iucv, *niucv;
1924 struct af_iucv_trans_hdr *trans_hdr;
1928 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1930 /* no sock - connection refused */
1931 afiucv_swap_src_dest(skb);
1932 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1933 err = dev_queue_xmit(skb);
1937 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1939 if ((sk->sk_state != IUCV_LISTEN) ||
1940 sk_acceptq_is_full(sk) ||
1942 /* error on server socket - connection refused */
1943 afiucv_swap_src_dest(skb);
1944 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1945 err = dev_queue_xmit(skb);
1946 iucv_sock_kill(nsk);
1951 niucv = iucv_sk(nsk);
1952 iucv_sock_init(nsk, sk);
1953 niucv->transport = AF_IUCV_TRANS_HIPER;
1954 niucv->msglimit = iucv->msglimit;
1955 if (!trans_hdr->window)
1956 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1958 niucv->msglimit_peer = trans_hdr->window;
1959 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1960 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1961 memcpy(niucv->src_name, iucv->src_name, 8);
1962 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1963 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1964 niucv->hs_dev = iucv->hs_dev;
1965 dev_hold(niucv->hs_dev);
1966 afiucv_swap_src_dest(skb);
1967 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1968 trans_hdr->window = niucv->msglimit;
1969 /* if receiver acks the xmit connection is established */
1970 err = dev_queue_xmit(skb);
1972 iucv_accept_enqueue(sk, nsk);
1973 nsk->sk_state = IUCV_CONNECTED;
1974 sk->sk_data_ready(sk);
1976 iucv_sock_kill(nsk);
1980 return NET_RX_SUCCESS;
1984 * afiucv_hs_callback_synack() - react on received SYN-ACK
1986 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1988 struct iucv_sock *iucv = iucv_sk(sk);
1989 struct af_iucv_trans_hdr *trans_hdr =
1990 (struct af_iucv_trans_hdr *)skb->data;
1994 if (sk->sk_state != IUCV_BOUND)
1997 iucv->msglimit_peer = trans_hdr->window;
1998 sk->sk_state = IUCV_CONNECTED;
1999 sk->sk_state_change(sk);
2003 return NET_RX_SUCCESS;
2007 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2009 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2011 struct iucv_sock *iucv = iucv_sk(sk);
2015 if (sk->sk_state != IUCV_BOUND)
2018 sk->sk_state = IUCV_DISCONN;
2019 sk->sk_state_change(sk);
2023 return NET_RX_SUCCESS;
2027 * afiucv_hs_callback_fin() - react on received FIN
2029 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2031 struct iucv_sock *iucv = iucv_sk(sk);
2033 /* other end of connection closed */
2037 if (sk->sk_state == IUCV_CONNECTED) {
2038 sk->sk_state = IUCV_DISCONN;
2039 sk->sk_state_change(sk);
2044 return NET_RX_SUCCESS;
2048 * afiucv_hs_callback_win() - react on received WIN
2050 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2052 struct iucv_sock *iucv = iucv_sk(sk);
2053 struct af_iucv_trans_hdr *trans_hdr =
2054 (struct af_iucv_trans_hdr *)skb->data;
2057 return NET_RX_SUCCESS;
2059 if (sk->sk_state != IUCV_CONNECTED)
2060 return NET_RX_SUCCESS;
2062 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2063 iucv_sock_wake_msglim(sk);
2064 return NET_RX_SUCCESS;
2068 * afiucv_hs_callback_rx() - react on received data
2070 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2072 struct iucv_sock *iucv = iucv_sk(sk);
2076 return NET_RX_SUCCESS;
2079 if (sk->sk_state != IUCV_CONNECTED) {
2081 return NET_RX_SUCCESS;
2084 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2086 return NET_RX_SUCCESS;
2089 /* write stuff from iucv_msg to skb cb */
2090 if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2092 return NET_RX_SUCCESS;
2094 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2095 skb_reset_transport_header(skb);
2096 skb_reset_network_header(skb);
2097 IUCV_SKB_CB(skb)->offset = 0;
2098 spin_lock(&iucv->message_q.lock);
2099 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2100 if (sock_queue_rcv_skb(sk, skb)) {
2101 /* handle rcv queue full */
2102 skb_queue_tail(&iucv->backlog_skb_q, skb);
2105 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2106 spin_unlock(&iucv->message_q.lock);
2107 return NET_RX_SUCCESS;
2111 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2113 * called from netif RX softirq
2115 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2116 struct packet_type *pt, struct net_device *orig_dev)
2119 struct iucv_sock *iucv;
2120 struct af_iucv_trans_hdr *trans_hdr;
2124 skb_pull(skb, ETH_HLEN);
2125 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2126 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2127 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2128 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2129 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2130 memset(nullstring, 0, sizeof(nullstring));
2133 read_lock(&iucv_sk_list.lock);
2134 sk_for_each(sk, &iucv_sk_list.head) {
2135 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2136 if ((!memcmp(&iucv_sk(sk)->src_name,
2137 trans_hdr->destAppName, 8)) &&
2138 (!memcmp(&iucv_sk(sk)->src_user_id,
2139 trans_hdr->destUserID, 8)) &&
2140 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2141 (!memcmp(&iucv_sk(sk)->dst_user_id,
2147 if ((!memcmp(&iucv_sk(sk)->src_name,
2148 trans_hdr->destAppName, 8)) &&
2149 (!memcmp(&iucv_sk(sk)->src_user_id,
2150 trans_hdr->destUserID, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->dst_name,
2152 trans_hdr->srcAppName, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_user_id,
2154 trans_hdr->srcUserID, 8))) {
2160 read_unlock(&iucv_sk_list.lock);
2165 how should we send with no sock
2166 1) send without sock no send rc checking?
2167 2) introduce default sock to handle this cases
2169 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2171 SYN|ACK, SYN|FIN, FIN -> no action? */
2173 switch (trans_hdr->flags) {
2174 case AF_IUCV_FLAG_SYN:
2175 /* connect request */
2176 err = afiucv_hs_callback_syn(sk, skb);
2178 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2179 /* connect request confirmed */
2180 err = afiucv_hs_callback_synack(sk, skb);
2182 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2183 /* connect request refused */
2184 err = afiucv_hs_callback_synfin(sk, skb);
2186 case (AF_IUCV_FLAG_FIN):
2188 err = afiucv_hs_callback_fin(sk, skb);
2190 case (AF_IUCV_FLAG_WIN):
2191 err = afiucv_hs_callback_win(sk, skb);
2192 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2196 /* fall through and receive non-zero length data */
2197 case (AF_IUCV_FLAG_SHT):
2198 /* shutdown request */
2199 /* fall through and receive zero length data */
2201 /* plain data frame */
2202 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2203 err = afiucv_hs_callback_rx(sk, skb);
2213 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2216 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2217 enum iucv_tx_notify n)
2219 struct sock *isk = skb->sk;
2220 struct sock *sk = NULL;
2221 struct iucv_sock *iucv = NULL;
2222 struct sk_buff_head *list;
2223 struct sk_buff *list_skb;
2224 struct sk_buff *nskb;
2225 unsigned long flags;
2227 read_lock_irqsave(&iucv_sk_list.lock, flags);
2228 sk_for_each(sk, &iucv_sk_list.head)
2233 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2235 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2238 list = &iucv->send_skb_q;
2239 spin_lock_irqsave(&list->lock, flags);
2240 if (skb_queue_empty(list))
2242 list_skb = list->next;
2243 nskb = list_skb->next;
2244 while (list_skb != (struct sk_buff *)list) {
2245 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2248 __skb_unlink(list_skb, list);
2249 kfree_skb(list_skb);
2250 iucv_sock_wake_msglim(sk);
2252 case TX_NOTIFY_PENDING:
2253 atomic_inc(&iucv->pendings);
2255 case TX_NOTIFY_DELAYED_OK:
2256 __skb_unlink(list_skb, list);
2257 atomic_dec(&iucv->pendings);
2258 if (atomic_read(&iucv->pendings) <= 0)
2259 iucv_sock_wake_msglim(sk);
2260 kfree_skb(list_skb);
2262 case TX_NOTIFY_UNREACHABLE:
2263 case TX_NOTIFY_DELAYED_UNREACHABLE:
2264 case TX_NOTIFY_TPQFULL: /* not yet used */
2265 case TX_NOTIFY_GENERALERROR:
2266 case TX_NOTIFY_DELAYED_GENERALERROR:
2267 __skb_unlink(list_skb, list);
2268 kfree_skb(list_skb);
2269 if (sk->sk_state == IUCV_CONNECTED) {
2270 sk->sk_state = IUCV_DISCONN;
2271 sk->sk_state_change(sk);
2281 spin_unlock_irqrestore(&list->lock, flags);
2283 if (sk->sk_state == IUCV_CLOSING) {
2284 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2285 sk->sk_state = IUCV_CLOSED;
2286 sk->sk_state_change(sk);
2293 * afiucv_netdev_event: handle netdev notifier chain events
2295 static int afiucv_netdev_event(struct notifier_block *this,
2296 unsigned long event, void *ptr)
2298 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2300 struct iucv_sock *iucv;
2304 case NETDEV_GOING_DOWN:
2305 sk_for_each(sk, &iucv_sk_list.head) {
2307 if ((iucv->hs_dev == event_dev) &&
2308 (sk->sk_state == IUCV_CONNECTED)) {
2309 if (event == NETDEV_GOING_DOWN)
2310 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2311 sk->sk_state = IUCV_DISCONN;
2312 sk->sk_state_change(sk);
2317 case NETDEV_UNREGISTER:
2324 static struct notifier_block afiucv_netdev_notifier = {
2325 .notifier_call = afiucv_netdev_event,
2328 static const struct proto_ops iucv_sock_ops = {
2330 .owner = THIS_MODULE,
2331 .release = iucv_sock_release,
2332 .bind = iucv_sock_bind,
2333 .connect = iucv_sock_connect,
2334 .listen = iucv_sock_listen,
2335 .accept = iucv_sock_accept,
2336 .getname = iucv_sock_getname,
2337 .sendmsg = iucv_sock_sendmsg,
2338 .recvmsg = iucv_sock_recvmsg,
2339 .poll = iucv_sock_poll,
2340 .ioctl = sock_no_ioctl,
2341 .mmap = sock_no_mmap,
2342 .socketpair = sock_no_socketpair,
2343 .shutdown = iucv_sock_shutdown,
2344 .setsockopt = iucv_sock_setsockopt,
2345 .getsockopt = iucv_sock_getsockopt,
2348 static const struct net_proto_family iucv_sock_family_ops = {
2350 .owner = THIS_MODULE,
2351 .create = iucv_sock_create,
2354 static struct packet_type iucv_packet_type = {
2355 .type = cpu_to_be16(ETH_P_AF_IUCV),
2356 .func = afiucv_hs_rcv,
2359 static int afiucv_iucv_init(void)
2363 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2366 /* establish dummy device */
2367 af_iucv_driver.bus = pr_iucv->bus;
2368 err = driver_register(&af_iucv_driver);
2371 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2376 dev_set_name(af_iucv_dev, "af_iucv");
2377 af_iucv_dev->bus = pr_iucv->bus;
2378 af_iucv_dev->parent = pr_iucv->root;
2379 af_iucv_dev->release = (void (*)(struct device *))kfree;
2380 af_iucv_dev->driver = &af_iucv_driver;
2381 err = device_register(af_iucv_dev);
2387 put_device(af_iucv_dev);
2389 driver_unregister(&af_iucv_driver);
2391 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2396 static void afiucv_iucv_exit(void)
2398 device_unregister(af_iucv_dev);
2399 driver_unregister(&af_iucv_driver);
2400 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2403 static int __init afiucv_init(void)
2407 if (MACHINE_IS_VM) {
2408 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2409 if (unlikely(err)) {
2411 err = -EPROTONOSUPPORT;
2415 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2417 printk(KERN_WARNING "iucv_if lookup failed\n");
2418 memset(&iucv_userid, 0, sizeof(iucv_userid));
2421 memset(&iucv_userid, 0, sizeof(iucv_userid));
2425 err = proto_register(&iucv_proto, 0);
2428 err = sock_register(&iucv_sock_family_ops);
2433 err = afiucv_iucv_init();
2438 err = register_netdevice_notifier(&afiucv_netdev_notifier);
2442 dev_add_pack(&iucv_packet_type);
2449 sock_unregister(PF_IUCV);
2451 proto_unregister(&iucv_proto);
2454 symbol_put(iucv_if);
2458 static void __exit afiucv_exit(void)
2462 symbol_put(iucv_if);
2465 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2466 dev_remove_pack(&iucv_packet_type);
2467 sock_unregister(PF_IUCV);
2468 proto_unregister(&iucv_proto);
2471 module_init(afiucv_init);
2472 module_exit(afiucv_exit);
2474 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2475 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2476 MODULE_VERSION(VERSION);
2477 MODULE_LICENSE("GPL");
2478 MODULE_ALIAS_NETPROTO(PF_IUCV);