1 // SPDX-License-Identifier: GPL-2.0-only
3 * IUCV protocol stack for Linux on zSeries
5 * Copyright IBM Corp. 2006, 2009
7 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
8 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 * Ursula Braun <ursula.braun@de.ibm.com>
13 #define KMSG_COMPONENT "af_iucv"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/types.h>
19 #include <linux/limits.h>
20 #include <linux/list.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/slab.h>
25 #include <linux/skbuff.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/security.h>
30 #include <asm/ebcdic.h>
31 #include <asm/cpcmd.h>
32 #include <linux/kmod.h>
34 #include <net/iucv/af_iucv.h>
38 static char iucv_userid[80];
40 static struct proto iucv_proto = {
43 .obj_size = sizeof(struct iucv_sock),
46 static struct iucv_interface *pr_iucv;
48 /* special AF_IUCV IPRM messages */
49 static const u8 iprm_shutdown[8] =
50 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
52 #define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
54 #define __iucv_sock_wait(sk, condition, timeo, ret) \
56 DEFINE_WAIT(__wait); \
57 long __timeo = timeo; \
59 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
60 while (!(condition)) { \
65 if (signal_pending(current)) { \
66 ret = sock_intr_errno(__timeo); \
70 __timeo = schedule_timeout(__timeo); \
72 ret = sock_error(sk); \
76 finish_wait(sk_sleep(sk), &__wait); \
79 #define iucv_sock_wait(sk, condition, timeo) \
83 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 static struct sock *iucv_accept_dequeue(struct sock *parent,
88 struct socket *newsock);
89 static void iucv_sock_kill(struct sock *sk);
90 static void iucv_sock_close(struct sock *sk);
92 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
94 /* Call Back functions */
95 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
96 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
97 static void iucv_callback_connack(struct iucv_path *, u8 *);
98 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
99 static void iucv_callback_connrej(struct iucv_path *, u8 *);
100 static void iucv_callback_shutdown(struct iucv_path *, u8 *);
102 static struct iucv_sock_list iucv_sk_list = {
103 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
104 .autobind_name = ATOMIC_INIT(0)
107 static struct iucv_handler af_iucv_handler = {
108 .path_pending = iucv_callback_connreq,
109 .path_complete = iucv_callback_connack,
110 .path_severed = iucv_callback_connrej,
111 .message_pending = iucv_callback_rx,
112 .message_complete = iucv_callback_txdone,
113 .path_quiesced = iucv_callback_shutdown,
116 static inline void high_nmcpy(unsigned char *dst, char *src)
121 static inline void low_nmcpy(unsigned char *dst, char *src)
123 memcpy(&dst[8], src, 8);
127 * iucv_msg_length() - Returns the length of an iucv message.
128 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
130 * The function returns the length of the specified iucv message @msg of data
131 * stored in a buffer and of data stored in the parameter list (PRMDATA).
133 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
135 * PRMDATA[0..6] socket data (max 7 bytes);
136 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
138 * The socket data length is computed by subtracting the socket data length
140 * If the socket data len is greater 7, then PRMDATA can be used for special
141 * notifications (see iucv_sock_shutdown); and further,
142 * if the socket data len is > 7, the function returns 8.
144 * Use this function to allocate socket buffers to store iucv message data.
146 static inline size_t iucv_msg_length(struct iucv_message *msg)
150 if (msg->flags & IUCV_IPRMDATA) {
151 datalen = 0xff - msg->rmmsg[7];
152 return (datalen < 8) ? datalen : 8;
158 * iucv_sock_in_state() - check for specific states
159 * @sk: sock structure
160 * @state: first iucv sk state
161 * @state: second iucv sk state
163 * Returns true if the socket in either in the first or second state.
165 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
167 return (sk->sk_state == state || sk->sk_state == state2);
171 * iucv_below_msglim() - function to check if messages can be sent
172 * @sk: sock structure
174 * Returns true if the send queue length is lower than the message limit.
175 * Always returns true if the socket is not connected (no iucv path for
176 * checking the message limit).
178 static inline int iucv_below_msglim(struct sock *sk)
180 struct iucv_sock *iucv = iucv_sk(sk);
182 if (sk->sk_state != IUCV_CONNECTED)
184 if (iucv->transport == AF_IUCV_TRANS_IUCV)
185 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
187 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
188 (atomic_read(&iucv->pendings) <= 0));
192 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
194 static void iucv_sock_wake_msglim(struct sock *sk)
196 struct socket_wq *wq;
199 wq = rcu_dereference(sk->sk_wq);
200 if (skwq_has_sleeper(wq))
201 wake_up_interruptible_all(&wq->wait);
202 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
207 * afiucv_hs_send() - send a message through HiperSockets transport
209 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
210 struct sk_buff *skb, u8 flags)
212 struct iucv_sock *iucv = iucv_sk(sock);
213 struct af_iucv_trans_hdr *phs_hdr;
214 struct sk_buff *nskb;
215 int err, confirm_recv = 0;
217 phs_hdr = skb_push(skb, sizeof(*phs_hdr));
218 memset(phs_hdr, 0, sizeof(*phs_hdr));
219 skb_reset_network_header(skb);
221 phs_hdr->magic = ETH_P_AF_IUCV;
222 phs_hdr->version = 1;
223 phs_hdr->flags = flags;
224 if (flags == AF_IUCV_FLAG_SYN)
225 phs_hdr->window = iucv->msglimit;
226 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
227 confirm_recv = atomic_read(&iucv->msg_recv);
228 phs_hdr->window = confirm_recv;
230 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
232 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
233 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
234 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
235 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
236 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
237 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
238 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
239 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
241 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
243 skb->dev = iucv->hs_dev;
249 dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
251 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
255 if (skb->len > skb->dev->mtu) {
256 if (sock->sk_type == SOCK_SEQPACKET) {
260 skb_trim(skb, skb->dev->mtu);
262 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
264 __skb_header_release(skb);
265 nskb = skb_clone(skb, GFP_ATOMIC);
271 skb_queue_tail(&iucv->send_skb_q, nskb);
272 err = dev_queue_xmit(skb);
273 if (net_xmit_eval(err)) {
274 skb_unlink(nskb, &iucv->send_skb_q);
277 atomic_sub(confirm_recv, &iucv->msg_recv);
278 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
280 return net_xmit_eval(err);
287 static struct sock *__iucv_get_sock_by_name(char *nm)
291 sk_for_each(sk, &iucv_sk_list.head)
292 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
298 static void iucv_sock_destruct(struct sock *sk)
300 skb_queue_purge(&sk->sk_receive_queue);
301 skb_queue_purge(&sk->sk_error_queue);
305 if (!sock_flag(sk, SOCK_DEAD)) {
306 pr_err("Attempt to release alive iucv socket %p\n", sk);
310 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
311 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
312 WARN_ON(sk->sk_wmem_queued);
313 WARN_ON(sk->sk_forward_alloc);
317 static void iucv_sock_cleanup_listen(struct sock *parent)
321 /* Close non-accepted connections */
322 while ((sk = iucv_accept_dequeue(parent, NULL))) {
327 parent->sk_state = IUCV_CLOSED;
330 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
332 write_lock_bh(&l->lock);
333 sk_add_node(sk, &l->head);
334 write_unlock_bh(&l->lock);
337 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
339 write_lock_bh(&l->lock);
340 sk_del_node_init(sk);
341 write_unlock_bh(&l->lock);
344 /* Kill socket (only if zapped and orphaned) */
345 static void iucv_sock_kill(struct sock *sk)
347 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
350 iucv_sock_unlink(&iucv_sk_list, sk);
351 sock_set_flag(sk, SOCK_DEAD);
355 /* Terminate an IUCV path */
356 static void iucv_sever_path(struct sock *sk, int with_user_data)
358 unsigned char user_data[16];
359 struct iucv_sock *iucv = iucv_sk(sk);
360 struct iucv_path *path = iucv->path;
364 if (with_user_data) {
365 low_nmcpy(user_data, iucv->src_name);
366 high_nmcpy(user_data, iucv->dst_name);
367 ASCEBC(user_data, sizeof(user_data));
368 pr_iucv->path_sever(path, user_data);
370 pr_iucv->path_sever(path, NULL);
371 iucv_path_free(path);
375 /* Send controlling flags through an IUCV socket for HIPER transport */
376 static int iucv_send_ctrl(struct sock *sk, u8 flags)
378 struct iucv_sock *iucv = iucv_sk(sk);
384 blen = sizeof(struct af_iucv_trans_hdr) +
385 LL_RESERVED_SPACE(iucv->hs_dev);
386 if (sk->sk_shutdown & SEND_SHUTDOWN) {
387 /* controlling flags should be sent anyway */
388 shutdown = sk->sk_shutdown;
389 sk->sk_shutdown &= RCV_SHUTDOWN;
391 skb = sock_alloc_send_skb(sk, blen, 1, &err);
393 skb_reserve(skb, blen);
394 err = afiucv_hs_send(NULL, sk, skb, flags);
397 sk->sk_shutdown = shutdown;
401 /* Close an IUCV socket */
402 static void iucv_sock_close(struct sock *sk)
404 struct iucv_sock *iucv = iucv_sk(sk);
410 switch (sk->sk_state) {
412 iucv_sock_cleanup_listen(sk);
416 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
417 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
418 sk->sk_state = IUCV_DISCONN;
419 sk->sk_state_change(sk);
424 sk->sk_state = IUCV_CLOSING;
425 sk->sk_state_change(sk);
427 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
428 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
429 timeo = sk->sk_lingertime;
431 timeo = IUCV_DISCONN_TIMEOUT;
433 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
439 sk->sk_state = IUCV_CLOSED;
440 sk->sk_state_change(sk);
442 sk->sk_err = ECONNRESET;
443 sk->sk_state_change(sk);
445 skb_queue_purge(&iucv->send_skb_q);
446 skb_queue_purge(&iucv->backlog_skb_q);
450 iucv_sever_path(sk, 1);
454 dev_put(iucv->hs_dev);
456 sk->sk_bound_dev_if = 0;
459 /* mark socket for deletion by iucv_sock_kill() */
460 sock_set_flag(sk, SOCK_ZAPPED);
465 static void iucv_sock_init(struct sock *sk, struct sock *parent)
468 sk->sk_type = parent->sk_type;
469 security_sk_clone(parent, sk);
473 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
476 struct iucv_sock *iucv;
478 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
483 sock_init_data(sock, sk);
484 INIT_LIST_HEAD(&iucv->accept_q);
485 spin_lock_init(&iucv->accept_q_lock);
486 skb_queue_head_init(&iucv->send_skb_q);
487 INIT_LIST_HEAD(&iucv->message_q.list);
488 spin_lock_init(&iucv->message_q.lock);
489 skb_queue_head_init(&iucv->backlog_skb_q);
491 atomic_set(&iucv->pendings, 0);
494 atomic_set(&iucv->msg_sent, 0);
495 atomic_set(&iucv->msg_recv, 0);
497 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
498 memset(&iucv->src_user_id , 0, 32);
500 iucv->transport = AF_IUCV_TRANS_IUCV;
502 iucv->transport = AF_IUCV_TRANS_HIPER;
504 sk->sk_destruct = iucv_sock_destruct;
505 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
507 sock_reset_flag(sk, SOCK_ZAPPED);
509 sk->sk_protocol = proto;
510 sk->sk_state = IUCV_OPEN;
512 iucv_sock_link(&iucv_sk_list, sk);
516 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
519 struct iucv_sock *par = iucv_sk(parent);
522 spin_lock_irqsave(&par->accept_q_lock, flags);
523 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
524 spin_unlock_irqrestore(&par->accept_q_lock, flags);
525 iucv_sk(sk)->parent = parent;
526 sk_acceptq_added(parent);
529 static void iucv_accept_unlink(struct sock *sk)
532 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
534 spin_lock_irqsave(&par->accept_q_lock, flags);
535 list_del_init(&iucv_sk(sk)->accept_q);
536 spin_unlock_irqrestore(&par->accept_q_lock, flags);
537 sk_acceptq_removed(iucv_sk(sk)->parent);
538 iucv_sk(sk)->parent = NULL;
542 static struct sock *iucv_accept_dequeue(struct sock *parent,
543 struct socket *newsock)
545 struct iucv_sock *isk, *n;
548 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
549 sk = (struct sock *) isk;
552 if (sk->sk_state == IUCV_CLOSED) {
553 iucv_accept_unlink(sk);
558 if (sk->sk_state == IUCV_CONNECTED ||
559 sk->sk_state == IUCV_DISCONN ||
561 iucv_accept_unlink(sk);
563 sock_graft(sk, newsock);
574 static void __iucv_auto_name(struct iucv_sock *iucv)
578 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
579 while (__iucv_get_sock_by_name(name)) {
580 sprintf(name, "%08x",
581 atomic_inc_return(&iucv_sk_list.autobind_name));
583 memcpy(iucv->src_name, name, 8);
586 /* Bind an unbound socket */
587 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
590 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
591 char uid[sizeof(sa->siucv_user_id)];
592 struct sock *sk = sock->sk;
593 struct iucv_sock *iucv;
595 struct net_device *dev;
597 /* Verify the input sockaddr */
598 if (addr_len < sizeof(struct sockaddr_iucv) ||
599 addr->sa_family != AF_IUCV)
603 if (sk->sk_state != IUCV_OPEN) {
608 write_lock_bh(&iucv_sk_list.lock);
611 if (__iucv_get_sock_by_name(sa->siucv_name)) {
618 /* Bind the socket */
620 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
621 goto vm_bind; /* VM IUCV transport */
623 /* try hiper transport */
624 memcpy(uid, sa->siucv_user_id, sizeof(uid));
627 for_each_netdev_rcu(&init_net, dev) {
628 if (!memcmp(dev->perm_addr, uid, 8)) {
629 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
630 /* Check for unitialized siucv_name */
631 if (strncmp(sa->siucv_name, " ", 8) == 0)
632 __iucv_auto_name(iucv);
634 memcpy(iucv->src_name, sa->siucv_name, 8);
635 sk->sk_bound_dev_if = dev->ifindex;
638 sk->sk_state = IUCV_BOUND;
639 iucv->transport = AF_IUCV_TRANS_HIPER;
641 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
649 /* use local userid for backward compat */
650 memcpy(iucv->src_name, sa->siucv_name, 8);
651 memcpy(iucv->src_user_id, iucv_userid, 8);
652 sk->sk_state = IUCV_BOUND;
653 iucv->transport = AF_IUCV_TRANS_IUCV;
654 sk->sk_allocation |= GFP_DMA;
656 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
659 /* found no dev to bind */
662 /* Release the socket list lock */
663 write_unlock_bh(&iucv_sk_list.lock);
669 /* Automatically bind an unbound socket */
670 static int iucv_sock_autobind(struct sock *sk)
672 struct iucv_sock *iucv = iucv_sk(sk);
675 if (unlikely(!pr_iucv))
678 memcpy(iucv->src_user_id, iucv_userid, 8);
679 iucv->transport = AF_IUCV_TRANS_IUCV;
680 sk->sk_allocation |= GFP_DMA;
682 write_lock_bh(&iucv_sk_list.lock);
683 __iucv_auto_name(iucv);
684 write_unlock_bh(&iucv_sk_list.lock);
687 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
692 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
694 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
695 struct sock *sk = sock->sk;
696 struct iucv_sock *iucv = iucv_sk(sk);
697 unsigned char user_data[16];
700 high_nmcpy(user_data, sa->siucv_name);
701 low_nmcpy(user_data, iucv->src_name);
702 ASCEBC(user_data, sizeof(user_data));
705 iucv->path = iucv_path_alloc(iucv->msglimit,
706 IUCV_IPRMDATA, GFP_KERNEL);
711 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
712 sa->siucv_user_id, NULL, user_data,
715 iucv_path_free(iucv->path);
718 case 0x0b: /* Target communicator is not logged on */
721 case 0x0d: /* Max connections for this guest exceeded */
722 case 0x0e: /* Max connections for target guest exceeded */
725 case 0x0f: /* Missing IUCV authorization */
737 /* Connect an unconnected socket */
738 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
741 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
742 struct sock *sk = sock->sk;
743 struct iucv_sock *iucv = iucv_sk(sk);
746 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
749 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
752 if (sk->sk_state == IUCV_OPEN &&
753 iucv->transport == AF_IUCV_TRANS_HIPER)
754 return -EBADFD; /* explicit bind required */
756 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
759 if (sk->sk_state == IUCV_OPEN) {
760 err = iucv_sock_autobind(sk);
767 /* Set the destination information */
768 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
769 memcpy(iucv->dst_name, sa->siucv_name, 8);
771 if (iucv->transport == AF_IUCV_TRANS_HIPER)
772 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
774 err = afiucv_path_connect(sock, addr);
778 if (sk->sk_state != IUCV_CONNECTED)
779 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
781 sock_sndtimeo(sk, flags & O_NONBLOCK));
783 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
786 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
787 iucv_sever_path(sk, 0);
794 /* Move a socket into listening state. */
795 static int iucv_sock_listen(struct socket *sock, int backlog)
797 struct sock *sk = sock->sk;
803 if (sk->sk_state != IUCV_BOUND)
806 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
809 sk->sk_max_ack_backlog = backlog;
810 sk->sk_ack_backlog = 0;
811 sk->sk_state = IUCV_LISTEN;
819 /* Accept a pending connection */
820 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
821 int flags, bool kern)
823 DECLARE_WAITQUEUE(wait, current);
824 struct sock *sk = sock->sk, *nsk;
828 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
830 if (sk->sk_state != IUCV_LISTEN) {
835 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
837 /* Wait for an incoming connection */
838 add_wait_queue_exclusive(sk_sleep(sk), &wait);
839 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
840 set_current_state(TASK_INTERRUPTIBLE);
847 timeo = schedule_timeout(timeo);
848 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
850 if (sk->sk_state != IUCV_LISTEN) {
855 if (signal_pending(current)) {
856 err = sock_intr_errno(timeo);
861 set_current_state(TASK_RUNNING);
862 remove_wait_queue(sk_sleep(sk), &wait);
867 newsock->state = SS_CONNECTED;
874 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
877 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
878 struct sock *sk = sock->sk;
879 struct iucv_sock *iucv = iucv_sk(sk);
881 addr->sa_family = AF_IUCV;
884 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
885 memcpy(siucv->siucv_name, iucv->dst_name, 8);
887 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
888 memcpy(siucv->siucv_name, iucv->src_name, 8);
890 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
891 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
892 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
894 return sizeof(struct sockaddr_iucv);
898 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
900 * @msg: Pointer to a struct iucv_message
901 * @skb: The socket data to send, skb->len MUST BE <= 7
903 * Send the socket data in the parameter list in the iucv message
904 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
905 * list and the socket data len at index 7 (last byte).
906 * See also iucv_msg_length().
908 * Returns the error code from the iucv_message_send() call.
910 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
915 memcpy(prmdata, (void *) skb->data, skb->len);
916 prmdata[7] = 0xff - (u8) skb->len;
917 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
918 (void *) prmdata, 8);
921 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
924 struct sock *sk = sock->sk;
925 struct iucv_sock *iucv = iucv_sk(sk);
929 struct iucv_message txmsg = {0};
930 struct cmsghdr *cmsg;
936 int noblock = msg->msg_flags & MSG_DONTWAIT;
938 err = sock_error(sk);
942 if (msg->msg_flags & MSG_OOB)
945 /* SOCK_SEQPACKET: we do not support segmented records */
946 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
951 if (sk->sk_shutdown & SEND_SHUTDOWN) {
956 /* Return if the socket is not in connected state */
957 if (sk->sk_state != IUCV_CONNECTED) {
962 /* initialize defaults */
963 cmsg_done = 0; /* check for duplicate headers */
965 /* iterate over control messages */
966 for_each_cmsghdr(cmsg, msg) {
967 if (!CMSG_OK(msg, cmsg)) {
972 if (cmsg->cmsg_level != SOL_IUCV)
975 if (cmsg->cmsg_type & cmsg_done) {
979 cmsg_done |= cmsg->cmsg_type;
981 switch (cmsg->cmsg_type) {
982 case SCM_IUCV_TRGCLS:
983 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
988 /* set iucv message target class */
990 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1000 /* allocate one skb for each iucv message:
1001 * this is fine for SOCK_SEQPACKET (unless we want to support
1002 * segmented records using the MSG_EOR flag), but
1003 * for SOCK_STREAM we might want to improve it in future */
1004 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1005 headroom = sizeof(struct af_iucv_trans_hdr) +
1006 LL_RESERVED_SPACE(iucv->hs_dev);
1009 if (len < PAGE_SIZE) {
1012 /* In nonlinear "classic" iucv skb,
1013 * reserve space for iucv_array
1015 headroom = sizeof(struct iucv_array) *
1016 (MAX_SKB_FRAGS + 1);
1017 linear = PAGE_SIZE - headroom;
1020 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1025 skb_reserve(skb, headroom);
1026 skb_put(skb, linear);
1028 skb->data_len = len - linear;
1029 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1033 /* wait if outstanding messages for iucv path has reached */
1034 timeo = sock_sndtimeo(sk, noblock);
1035 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1039 /* return -ECONNRESET if the socket is no longer connected */
1040 if (sk->sk_state != IUCV_CONNECTED) {
1045 /* increment and save iucv message tag for msg_completion cbk */
1046 txmsg.tag = iucv->send_tag++;
1047 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1049 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1050 atomic_inc(&iucv->msg_sent);
1051 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1053 atomic_dec(&iucv->msg_sent);
1056 } else { /* Classic VM IUCV transport */
1057 skb_queue_tail(&iucv->send_skb_q, skb);
1059 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1061 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1063 /* on success: there is no message_complete callback */
1064 /* for an IPRMDATA msg; remove skb from send queue */
1066 skb_unlink(skb, &iucv->send_skb_q);
1070 /* this error should never happen since the */
1071 /* IUCV_IPRMDATA path flag is set... sever path */
1073 pr_iucv->path_sever(iucv->path, NULL);
1074 skb_unlink(skb, &iucv->send_skb_q);
1078 } else if (skb_is_nonlinear(skb)) {
1079 struct iucv_array *iba = (struct iucv_array *)skb->head;
1082 /* skip iucv_array lying in the headroom */
1083 iba[0].address = (u32)(addr_t)skb->data;
1084 iba[0].length = (u32)skb_headlen(skb);
1085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1088 iba[i + 1].address =
1089 (u32)(addr_t)skb_frag_address(frag);
1090 iba[i + 1].length = (u32)skb_frag_size(frag);
1092 err = pr_iucv->message_send(iucv->path, &txmsg,
1094 (void *)iba, skb->len);
1095 } else { /* non-IPRM Linear skb */
1096 err = pr_iucv->message_send(iucv->path, &txmsg,
1097 0, 0, (void *)skb->data, skb->len);
1102 memcpy(user_id, iucv->dst_user_id, 8);
1104 memcpy(appl_id, iucv->dst_name, 8);
1106 "Application %s on z/VM guest %s exceeds message limit\n",
1112 skb_unlink(skb, &iucv->send_skb_q);
1127 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1129 size_t headroom, linear;
1130 struct sk_buff *skb;
1133 if (len < PAGE_SIZE) {
1137 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1138 linear = PAGE_SIZE - headroom;
1140 skb = alloc_skb_with_frags(headroom + linear, len - linear,
1141 0, &err, GFP_ATOMIC | GFP_DMA);
1143 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1147 skb_reserve(skb, headroom);
1148 skb_put(skb, linear);
1150 skb->data_len = len - linear;
1155 /* iucv_process_message() - Receive a single outstanding IUCV message
1157 * Locking: must be called with message_q.lock held
1159 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1160 struct iucv_path *path,
1161 struct iucv_message *msg)
1166 len = iucv_msg_length(msg);
1168 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1169 /* Note: the first 4 bytes are reserved for msg tag */
1170 IUCV_SKB_CB(skb)->class = msg->class;
1172 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1173 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1174 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1179 if (skb_is_nonlinear(skb)) {
1180 struct iucv_array *iba = (struct iucv_array *)skb->head;
1183 iba[0].address = (u32)(addr_t)skb->data;
1184 iba[0].length = (u32)skb_headlen(skb);
1185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1186 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1188 iba[i + 1].address =
1189 (u32)(addr_t)skb_frag_address(frag);
1190 iba[i + 1].length = (u32)skb_frag_size(frag);
1192 rc = pr_iucv->message_receive(path, msg,
1194 (void *)iba, len, NULL);
1196 rc = pr_iucv->message_receive(path, msg,
1197 msg->flags & IUCV_IPRMDATA,
1198 skb->data, len, NULL);
1204 WARN_ON_ONCE(skb->len != len);
1207 IUCV_SKB_CB(skb)->offset = 0;
1208 if (sk_filter(sk, skb)) {
1209 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
1213 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
1214 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1217 /* iucv_process_message_q() - Process outstanding IUCV messages
1219 * Locking: must be called with message_q.lock held
1221 static void iucv_process_message_q(struct sock *sk)
1223 struct iucv_sock *iucv = iucv_sk(sk);
1224 struct sk_buff *skb;
1225 struct sock_msg_q *p, *n;
1227 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1228 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1231 iucv_process_message(sk, skb, p->path, &p->msg);
1234 if (!skb_queue_empty(&iucv->backlog_skb_q))
1239 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1240 size_t len, int flags)
1242 int noblock = flags & MSG_DONTWAIT;
1243 struct sock *sk = sock->sk;
1244 struct iucv_sock *iucv = iucv_sk(sk);
1245 unsigned int copied, rlen;
1246 struct sk_buff *skb, *rskb, *cskb;
1250 if ((sk->sk_state == IUCV_DISCONN) &&
1251 skb_queue_empty(&iucv->backlog_skb_q) &&
1252 skb_queue_empty(&sk->sk_receive_queue) &&
1253 list_empty(&iucv->message_q.list))
1256 if (flags & (MSG_OOB))
1259 /* receive/dequeue next skb:
1260 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1261 skb = skb_recv_datagram(sk, flags, noblock, &err);
1263 if (sk->sk_shutdown & RCV_SHUTDOWN)
1268 offset = IUCV_SKB_CB(skb)->offset;
1269 rlen = skb->len - offset; /* real length of skb */
1270 copied = min_t(unsigned int, rlen, len);
1272 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1275 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1276 if (!(flags & MSG_PEEK))
1277 skb_queue_head(&sk->sk_receive_queue, skb);
1281 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1282 if (sk->sk_type == SOCK_SEQPACKET) {
1284 msg->msg_flags |= MSG_TRUNC;
1285 /* each iucv message contains a complete record */
1286 msg->msg_flags |= MSG_EOR;
1289 /* create control message to store iucv msg target class:
1290 * get the trgcls from the control buffer of the skb due to
1291 * fragmentation of original iucv message. */
1292 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1293 sizeof(IUCV_SKB_CB(skb)->class),
1294 (void *)&IUCV_SKB_CB(skb)->class);
1296 if (!(flags & MSG_PEEK))
1297 skb_queue_head(&sk->sk_receive_queue, skb);
1301 /* Mark read part of skb as used */
1302 if (!(flags & MSG_PEEK)) {
1304 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1305 if (sk->sk_type == SOCK_STREAM) {
1306 if (copied < rlen) {
1307 IUCV_SKB_CB(skb)->offset = offset + copied;
1308 skb_queue_head(&sk->sk_receive_queue, skb);
1314 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1315 atomic_inc(&iucv->msg_recv);
1316 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1318 iucv_sock_close(sk);
1323 /* Queue backlog skbs */
1324 spin_lock_bh(&iucv->message_q.lock);
1325 rskb = skb_dequeue(&iucv->backlog_skb_q);
1327 IUCV_SKB_CB(rskb)->offset = 0;
1328 if (__sock_queue_rcv_skb(sk, rskb)) {
1329 /* handle rcv queue full */
1330 skb_queue_head(&iucv->backlog_skb_q,
1334 rskb = skb_dequeue(&iucv->backlog_skb_q);
1336 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1337 if (!list_empty(&iucv->message_q.list))
1338 iucv_process_message_q(sk);
1339 if (atomic_read(&iucv->msg_recv) >=
1340 iucv->msglimit / 2) {
1341 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1343 sk->sk_state = IUCV_DISCONN;
1344 sk->sk_state_change(sk);
1348 spin_unlock_bh(&iucv->message_q.lock);
1352 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1353 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1359 static inline __poll_t iucv_accept_poll(struct sock *parent)
1361 struct iucv_sock *isk, *n;
1364 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1365 sk = (struct sock *) isk;
1367 if (sk->sk_state == IUCV_CONNECTED)
1368 return EPOLLIN | EPOLLRDNORM;
1374 static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1377 struct sock *sk = sock->sk;
1380 sock_poll_wait(file, sock, wait);
1382 if (sk->sk_state == IUCV_LISTEN)
1383 return iucv_accept_poll(sk);
1385 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1387 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1389 if (sk->sk_shutdown & RCV_SHUTDOWN)
1392 if (sk->sk_shutdown == SHUTDOWN_MASK)
1395 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1396 (sk->sk_shutdown & RCV_SHUTDOWN))
1397 mask |= EPOLLIN | EPOLLRDNORM;
1399 if (sk->sk_state == IUCV_CLOSED)
1402 if (sk->sk_state == IUCV_DISCONN)
1405 if (sock_writeable(sk) && iucv_below_msglim(sk))
1406 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1408 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1413 static int iucv_sock_shutdown(struct socket *sock, int how)
1415 struct sock *sk = sock->sk;
1416 struct iucv_sock *iucv = iucv_sk(sk);
1417 struct iucv_message txmsg;
1422 if ((how & ~SHUTDOWN_MASK) || !how)
1426 switch (sk->sk_state) {
1437 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1438 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1441 err = pr_iucv->message_send(iucv->path, &txmsg,
1442 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1457 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1460 sk->sk_shutdown |= how;
1461 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1462 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1464 err = pr_iucv->path_quiesce(iucv->path, NULL);
1467 /* skb_queue_purge(&sk->sk_receive_queue); */
1469 skb_queue_purge(&sk->sk_receive_queue);
1472 /* Wake up anyone sleeping in poll */
1473 sk->sk_state_change(sk);
1480 static int iucv_sock_release(struct socket *sock)
1482 struct sock *sk = sock->sk;
1488 iucv_sock_close(sk);
1495 /* getsockopt and setsockopt */
1496 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1497 sockptr_t optval, unsigned int optlen)
1499 struct sock *sk = sock->sk;
1500 struct iucv_sock *iucv = iucv_sk(sk);
1504 if (level != SOL_IUCV)
1505 return -ENOPROTOOPT;
1507 if (optlen < sizeof(int))
1510 if (copy_from_sockptr(&val, optval, sizeof(int)))
1517 case SO_IPRMDATA_MSG:
1519 iucv->flags |= IUCV_IPRMDATA;
1521 iucv->flags &= ~IUCV_IPRMDATA;
1524 switch (sk->sk_state) {
1527 if (val < 1 || val > U16_MAX)
1530 iucv->msglimit = val;
1546 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1547 char __user *optval, int __user *optlen)
1549 struct sock *sk = sock->sk;
1550 struct iucv_sock *iucv = iucv_sk(sk);
1554 if (level != SOL_IUCV)
1555 return -ENOPROTOOPT;
1557 if (get_user(len, optlen))
1563 len = min_t(unsigned int, len, sizeof(int));
1566 case SO_IPRMDATA_MSG:
1567 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1571 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1572 : iucv->msglimit; /* default */
1576 if (sk->sk_state == IUCV_OPEN)
1578 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1579 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1583 return -ENOPROTOOPT;
1586 if (put_user(len, optlen))
1588 if (copy_to_user(optval, &val, len))
1595 /* Callback wrappers - called from iucv base support */
1596 static int iucv_callback_connreq(struct iucv_path *path,
1597 u8 ipvmid[8], u8 ipuser[16])
1599 unsigned char user_data[16];
1600 unsigned char nuser_data[16];
1601 unsigned char src_name[8];
1602 struct sock *sk, *nsk;
1603 struct iucv_sock *iucv, *niucv;
1606 memcpy(src_name, ipuser, 8);
1607 EBCASC(src_name, 8);
1608 /* Find out if this path belongs to af_iucv. */
1609 read_lock(&iucv_sk_list.lock);
1612 sk_for_each(sk, &iucv_sk_list.head)
1613 if (sk->sk_state == IUCV_LISTEN &&
1614 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1616 * Found a listening socket with
1617 * src_name == ipuser[0-7].
1622 read_unlock(&iucv_sk_list.lock);
1624 /* No socket found, not one of our paths. */
1629 /* Check if parent socket is listening */
1630 low_nmcpy(user_data, iucv->src_name);
1631 high_nmcpy(user_data, iucv->dst_name);
1632 ASCEBC(user_data, sizeof(user_data));
1633 if (sk->sk_state != IUCV_LISTEN) {
1634 err = pr_iucv->path_sever(path, user_data);
1635 iucv_path_free(path);
1639 /* Check for backlog size */
1640 if (sk_acceptq_is_full(sk)) {
1641 err = pr_iucv->path_sever(path, user_data);
1642 iucv_path_free(path);
1646 /* Create the new socket */
1647 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1649 err = pr_iucv->path_sever(path, user_data);
1650 iucv_path_free(path);
1654 niucv = iucv_sk(nsk);
1655 iucv_sock_init(nsk, sk);
1656 niucv->transport = AF_IUCV_TRANS_IUCV;
1657 nsk->sk_allocation |= GFP_DMA;
1659 /* Set the new iucv_sock */
1660 memcpy(niucv->dst_name, ipuser + 8, 8);
1661 EBCASC(niucv->dst_name, 8);
1662 memcpy(niucv->dst_user_id, ipvmid, 8);
1663 memcpy(niucv->src_name, iucv->src_name, 8);
1664 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1667 /* Call iucv_accept */
1668 high_nmcpy(nuser_data, ipuser + 8);
1669 memcpy(nuser_data + 8, niucv->src_name, 8);
1670 ASCEBC(nuser_data + 8, 8);
1672 /* set message limit for path based on msglimit of accepting socket */
1673 niucv->msglimit = iucv->msglimit;
1674 path->msglim = iucv->msglimit;
1675 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1677 iucv_sever_path(nsk, 1);
1678 iucv_sock_kill(nsk);
1682 iucv_accept_enqueue(sk, nsk);
1684 /* Wake up accept */
1685 nsk->sk_state = IUCV_CONNECTED;
1686 sk->sk_data_ready(sk);
1693 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1695 struct sock *sk = path->private;
1697 sk->sk_state = IUCV_CONNECTED;
1698 sk->sk_state_change(sk);
1701 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1703 struct sock *sk = path->private;
1704 struct iucv_sock *iucv = iucv_sk(sk);
1705 struct sk_buff *skb;
1706 struct sock_msg_q *save_msg;
1709 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1710 pr_iucv->message_reject(path, msg);
1714 spin_lock(&iucv->message_q.lock);
1716 if (!list_empty(&iucv->message_q.list) ||
1717 !skb_queue_empty(&iucv->backlog_skb_q))
1720 len = atomic_read(&sk->sk_rmem_alloc);
1721 len += SKB_TRUESIZE(iucv_msg_length(msg));
1722 if (len > sk->sk_rcvbuf)
1725 skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1729 iucv_process_message(sk, skb, path, msg);
1733 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1736 save_msg->path = path;
1737 save_msg->msg = *msg;
1739 list_add_tail(&save_msg->list, &iucv->message_q.list);
1742 spin_unlock(&iucv->message_q.lock);
1745 static void iucv_callback_txdone(struct iucv_path *path,
1746 struct iucv_message *msg)
1748 struct sock *sk = path->private;
1749 struct sk_buff *this = NULL;
1750 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1751 struct sk_buff *list_skb;
1752 unsigned long flags;
1756 spin_lock_irqsave(&list->lock, flags);
1757 skb_queue_walk(list, list_skb) {
1758 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1764 __skb_unlink(this, list);
1765 spin_unlock_irqrestore(&list->lock, flags);
1769 /* wake up any process waiting for sending */
1770 iucv_sock_wake_msglim(sk);
1773 if (sk->sk_state == IUCV_CLOSING) {
1774 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1775 sk->sk_state = IUCV_CLOSED;
1776 sk->sk_state_change(sk);
1783 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1785 struct sock *sk = path->private;
1787 if (sk->sk_state == IUCV_CLOSED)
1791 iucv_sever_path(sk, 1);
1792 sk->sk_state = IUCV_DISCONN;
1794 sk->sk_state_change(sk);
1798 /* called if the other communication side shuts down its RECV direction;
1799 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1801 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1803 struct sock *sk = path->private;
1806 if (sk->sk_state != IUCV_CLOSED) {
1807 sk->sk_shutdown |= SEND_SHUTDOWN;
1808 sk->sk_state_change(sk);
1813 /***************** HiperSockets transport callbacks ********************/
1814 static void afiucv_swap_src_dest(struct sk_buff *skb)
1816 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1820 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1821 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1822 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1823 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1824 memcpy(tmpID, trans_hdr->srcUserID, 8);
1825 memcpy(tmpName, trans_hdr->srcAppName, 8);
1826 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1827 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1828 memcpy(trans_hdr->destUserID, tmpID, 8);
1829 memcpy(trans_hdr->destAppName, tmpName, 8);
1830 skb_push(skb, ETH_HLEN);
1831 memset(skb->data, 0, ETH_HLEN);
1835 * afiucv_hs_callback_syn - react on received SYN
1837 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1839 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1841 struct iucv_sock *iucv, *niucv;
1846 /* no sock - connection refused */
1847 afiucv_swap_src_dest(skb);
1848 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1849 err = dev_queue_xmit(skb);
1853 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1855 if ((sk->sk_state != IUCV_LISTEN) ||
1856 sk_acceptq_is_full(sk) ||
1858 /* error on server socket - connection refused */
1859 afiucv_swap_src_dest(skb);
1860 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1861 err = dev_queue_xmit(skb);
1862 iucv_sock_kill(nsk);
1867 niucv = iucv_sk(nsk);
1868 iucv_sock_init(nsk, sk);
1869 niucv->transport = AF_IUCV_TRANS_HIPER;
1870 niucv->msglimit = iucv->msglimit;
1871 if (!trans_hdr->window)
1872 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1874 niucv->msglimit_peer = trans_hdr->window;
1875 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1876 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1877 memcpy(niucv->src_name, iucv->src_name, 8);
1878 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1879 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1880 niucv->hs_dev = iucv->hs_dev;
1881 dev_hold(niucv->hs_dev);
1882 afiucv_swap_src_dest(skb);
1883 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1884 trans_hdr->window = niucv->msglimit;
1885 /* if receiver acks the xmit connection is established */
1886 err = dev_queue_xmit(skb);
1888 iucv_accept_enqueue(sk, nsk);
1889 nsk->sk_state = IUCV_CONNECTED;
1890 sk->sk_data_ready(sk);
1892 iucv_sock_kill(nsk);
1896 return NET_RX_SUCCESS;
1900 * afiucv_hs_callback_synack() - react on received SYN-ACK
1902 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1904 struct iucv_sock *iucv = iucv_sk(sk);
1908 if (sk->sk_state != IUCV_BOUND)
1911 iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
1912 sk->sk_state = IUCV_CONNECTED;
1913 sk->sk_state_change(sk);
1917 return NET_RX_SUCCESS;
1921 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1923 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
1925 struct iucv_sock *iucv = iucv_sk(sk);
1929 if (sk->sk_state != IUCV_BOUND)
1932 sk->sk_state = IUCV_DISCONN;
1933 sk->sk_state_change(sk);
1937 return NET_RX_SUCCESS;
1941 * afiucv_hs_callback_fin() - react on received FIN
1943 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
1945 struct iucv_sock *iucv = iucv_sk(sk);
1947 /* other end of connection closed */
1951 if (sk->sk_state == IUCV_CONNECTED) {
1952 sk->sk_state = IUCV_DISCONN;
1953 sk->sk_state_change(sk);
1958 return NET_RX_SUCCESS;
1962 * afiucv_hs_callback_win() - react on received WIN
1964 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
1966 struct iucv_sock *iucv = iucv_sk(sk);
1969 return NET_RX_SUCCESS;
1971 if (sk->sk_state != IUCV_CONNECTED)
1972 return NET_RX_SUCCESS;
1974 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
1975 iucv_sock_wake_msglim(sk);
1976 return NET_RX_SUCCESS;
1980 * afiucv_hs_callback_rx() - react on received data
1982 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
1984 struct iucv_sock *iucv = iucv_sk(sk);
1988 return NET_RX_SUCCESS;
1991 if (sk->sk_state != IUCV_CONNECTED) {
1993 return NET_RX_SUCCESS;
1996 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1998 return NET_RX_SUCCESS;
2001 /* write stuff from iucv_msg to skb cb */
2002 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2003 skb_reset_transport_header(skb);
2004 skb_reset_network_header(skb);
2005 IUCV_SKB_CB(skb)->offset = 0;
2006 if (sk_filter(sk, skb)) {
2007 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
2009 return NET_RX_SUCCESS;
2012 spin_lock(&iucv->message_q.lock);
2013 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2014 if (__sock_queue_rcv_skb(sk, skb))
2015 /* handle rcv queue full */
2016 skb_queue_tail(&iucv->backlog_skb_q, skb);
2018 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2019 spin_unlock(&iucv->message_q.lock);
2020 return NET_RX_SUCCESS;
2024 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2026 * called from netif RX softirq
2028 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2029 struct packet_type *pt, struct net_device *orig_dev)
2032 struct iucv_sock *iucv;
2033 struct af_iucv_trans_hdr *trans_hdr;
2034 int err = NET_RX_SUCCESS;
2037 if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
2038 WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
2040 return NET_RX_SUCCESS;
2043 trans_hdr = iucv_trans_hdr(skb);
2044 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2045 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2046 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2047 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2048 memset(nullstring, 0, sizeof(nullstring));
2051 read_lock(&iucv_sk_list.lock);
2052 sk_for_each(sk, &iucv_sk_list.head) {
2053 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2054 if ((!memcmp(&iucv_sk(sk)->src_name,
2055 trans_hdr->destAppName, 8)) &&
2056 (!memcmp(&iucv_sk(sk)->src_user_id,
2057 trans_hdr->destUserID, 8)) &&
2058 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2059 (!memcmp(&iucv_sk(sk)->dst_user_id,
2065 if ((!memcmp(&iucv_sk(sk)->src_name,
2066 trans_hdr->destAppName, 8)) &&
2067 (!memcmp(&iucv_sk(sk)->src_user_id,
2068 trans_hdr->destUserID, 8)) &&
2069 (!memcmp(&iucv_sk(sk)->dst_name,
2070 trans_hdr->srcAppName, 8)) &&
2071 (!memcmp(&iucv_sk(sk)->dst_user_id,
2072 trans_hdr->srcUserID, 8))) {
2078 read_unlock(&iucv_sk_list.lock);
2083 how should we send with no sock
2084 1) send without sock no send rc checking?
2085 2) introduce default sock to handle this cases
2087 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2089 SYN|ACK, SYN|FIN, FIN -> no action? */
2091 switch (trans_hdr->flags) {
2092 case AF_IUCV_FLAG_SYN:
2093 /* connect request */
2094 err = afiucv_hs_callback_syn(sk, skb);
2096 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2097 /* connect request confirmed */
2098 err = afiucv_hs_callback_synack(sk, skb);
2100 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2101 /* connect request refused */
2102 err = afiucv_hs_callback_synfin(sk, skb);
2104 case (AF_IUCV_FLAG_FIN):
2106 err = afiucv_hs_callback_fin(sk, skb);
2108 case (AF_IUCV_FLAG_WIN):
2109 err = afiucv_hs_callback_win(sk, skb);
2110 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2114 fallthrough; /* and receive non-zero length data */
2115 case (AF_IUCV_FLAG_SHT):
2116 /* shutdown request */
2117 fallthrough; /* and receive zero length data */
2119 /* plain data frame */
2120 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2121 err = afiucv_hs_callback_rx(sk, skb);
2131 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2134 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2135 enum iucv_tx_notify n)
2137 struct sock *isk = skb->sk;
2138 struct sock *sk = NULL;
2139 struct iucv_sock *iucv = NULL;
2140 struct sk_buff_head *list;
2141 struct sk_buff *list_skb;
2142 struct sk_buff *nskb;
2143 unsigned long flags;
2145 read_lock_irqsave(&iucv_sk_list.lock, flags);
2146 sk_for_each(sk, &iucv_sk_list.head)
2151 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2153 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2156 list = &iucv->send_skb_q;
2157 spin_lock_irqsave(&list->lock, flags);
2158 skb_queue_walk_safe(list, list_skb, nskb) {
2159 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2162 __skb_unlink(list_skb, list);
2163 kfree_skb(list_skb);
2164 iucv_sock_wake_msglim(sk);
2166 case TX_NOTIFY_PENDING:
2167 atomic_inc(&iucv->pendings);
2169 case TX_NOTIFY_DELAYED_OK:
2170 __skb_unlink(list_skb, list);
2171 atomic_dec(&iucv->pendings);
2172 if (atomic_read(&iucv->pendings) <= 0)
2173 iucv_sock_wake_msglim(sk);
2174 kfree_skb(list_skb);
2176 case TX_NOTIFY_UNREACHABLE:
2177 case TX_NOTIFY_DELAYED_UNREACHABLE:
2178 case TX_NOTIFY_TPQFULL: /* not yet used */
2179 case TX_NOTIFY_GENERALERROR:
2180 case TX_NOTIFY_DELAYED_GENERALERROR:
2181 __skb_unlink(list_skb, list);
2182 kfree_skb(list_skb);
2183 if (sk->sk_state == IUCV_CONNECTED) {
2184 sk->sk_state = IUCV_DISCONN;
2185 sk->sk_state_change(sk);
2192 spin_unlock_irqrestore(&list->lock, flags);
2194 if (sk->sk_state == IUCV_CLOSING) {
2195 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2196 sk->sk_state = IUCV_CLOSED;
2197 sk->sk_state_change(sk);
2204 * afiucv_netdev_event: handle netdev notifier chain events
2206 static int afiucv_netdev_event(struct notifier_block *this,
2207 unsigned long event, void *ptr)
2209 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2211 struct iucv_sock *iucv;
2215 case NETDEV_GOING_DOWN:
2216 sk_for_each(sk, &iucv_sk_list.head) {
2218 if ((iucv->hs_dev == event_dev) &&
2219 (sk->sk_state == IUCV_CONNECTED)) {
2220 if (event == NETDEV_GOING_DOWN)
2221 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2222 sk->sk_state = IUCV_DISCONN;
2223 sk->sk_state_change(sk);
2228 case NETDEV_UNREGISTER:
2235 static struct notifier_block afiucv_netdev_notifier = {
2236 .notifier_call = afiucv_netdev_event,
2239 static const struct proto_ops iucv_sock_ops = {
2241 .owner = THIS_MODULE,
2242 .release = iucv_sock_release,
2243 .bind = iucv_sock_bind,
2244 .connect = iucv_sock_connect,
2245 .listen = iucv_sock_listen,
2246 .accept = iucv_sock_accept,
2247 .getname = iucv_sock_getname,
2248 .sendmsg = iucv_sock_sendmsg,
2249 .recvmsg = iucv_sock_recvmsg,
2250 .poll = iucv_sock_poll,
2251 .ioctl = sock_no_ioctl,
2252 .mmap = sock_no_mmap,
2253 .socketpair = sock_no_socketpair,
2254 .shutdown = iucv_sock_shutdown,
2255 .setsockopt = iucv_sock_setsockopt,
2256 .getsockopt = iucv_sock_getsockopt,
2259 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
2264 if (protocol && protocol != PF_IUCV)
2265 return -EPROTONOSUPPORT;
2267 sock->state = SS_UNCONNECTED;
2269 switch (sock->type) {
2271 case SOCK_SEQPACKET:
2272 /* currently, proto ops can handle both sk types */
2273 sock->ops = &iucv_sock_ops;
2276 return -ESOCKTNOSUPPORT;
2279 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
2283 iucv_sock_init(sk, NULL);
2288 static const struct net_proto_family iucv_sock_family_ops = {
2290 .owner = THIS_MODULE,
2291 .create = iucv_sock_create,
2294 static struct packet_type iucv_packet_type = {
2295 .type = cpu_to_be16(ETH_P_AF_IUCV),
2296 .func = afiucv_hs_rcv,
2299 static int afiucv_iucv_init(void)
2301 return pr_iucv->iucv_register(&af_iucv_handler, 0);
2304 static void afiucv_iucv_exit(void)
2306 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2309 static int __init afiucv_init(void)
2313 if (MACHINE_IS_VM) {
2314 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2315 if (unlikely(err)) {
2317 err = -EPROTONOSUPPORT;
2321 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2323 printk(KERN_WARNING "iucv_if lookup failed\n");
2324 memset(&iucv_userid, 0, sizeof(iucv_userid));
2327 memset(&iucv_userid, 0, sizeof(iucv_userid));
2331 err = proto_register(&iucv_proto, 0);
2334 err = sock_register(&iucv_sock_family_ops);
2339 err = afiucv_iucv_init();
2344 err = register_netdevice_notifier(&afiucv_netdev_notifier);
2348 dev_add_pack(&iucv_packet_type);
2355 sock_unregister(PF_IUCV);
2357 proto_unregister(&iucv_proto);
2360 symbol_put(iucv_if);
2364 static void __exit afiucv_exit(void)
2368 symbol_put(iucv_if);
2371 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2372 dev_remove_pack(&iucv_packet_type);
2373 sock_unregister(PF_IUCV);
2374 proto_unregister(&iucv_proto);
2377 module_init(afiucv_init);
2378 module_exit(afiucv_exit);
2380 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2381 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2382 MODULE_VERSION(VERSION);
2383 MODULE_LICENSE("GPL");
2384 MODULE_ALIAS_NETPROTO(PF_IUCV);