1 // SPDX-License-Identifier: GPL-2.0
5 * Generic datagram handling routines. These are generic for all
6 * protocols. Possibly a generic IP version on top of these would
7 * make sense. Not tonight however 8-).
8 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9 * NetROM layer all have identical poll code and mostly
10 * identical recvmsg() code. So we share it here. The poll was
11 * shared before but buried in udp.c so I moved it.
13 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
17 * Alan Cox : NULL return from skb_peek_copy()
19 * Alan Cox : Rewrote skb_read_datagram to avoid the
20 * skb_peek_copy stuff.
21 * Alan Cox : Added support for SOCK_SEQPACKET.
22 * IPX can no longer use the SO_TYPE hack
23 * but AX.25 now works right, and SPX is
25 * Alan Cox : Fixed write poll of non IP protocol
27 * Florian La Roche: Changed for my new skbuff handling.
28 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
29 * Linus Torvalds : BSD semantic fixes.
30 * Alan Cox : Datagram iovec handling
31 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
32 * Alan Cox : POSIXisms
33 * Pete Wyckoff : Unconnected accept() fix.
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54 #include <linux/indirect_call_wrapper.h>
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
59 #include <net/checksum.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
66 * Is a socket 'connection oriented' ?
68 static inline int connection_based(struct sock *sk)
70 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
73 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
77 * Avoid a wakeup if event not interesting for us
79 if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
81 return autoremove_wake_function(wait, mode, sync, key);
84 * Wait for the last received packet to be different from skb
86 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
87 int *err, long *timeo_p,
88 const struct sk_buff *skb)
91 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
93 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
96 error = sock_error(sk);
100 if (READ_ONCE(queue->prev) != skb)
103 /* Socket shut down? */
104 if (sk->sk_shutdown & RCV_SHUTDOWN)
107 /* Sequenced packets can come disconnected.
108 * If so we report the problem
111 if (connection_based(sk) &&
112 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
116 if (signal_pending(current))
120 *timeo_p = schedule_timeout(*timeo_p);
122 finish_wait(sk_sleep(sk), &wait);
125 error = sock_intr_errno(*timeo_p);
134 EXPORT_SYMBOL(__skb_wait_for_more_packets);
136 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
138 struct sk_buff *nskb;
143 /* We have to unshare an skb before modifying it. */
144 if (!skb_shared(skb))
147 nskb = skb_clone(skb, GFP_ATOMIC);
149 return ERR_PTR(-ENOMEM);
151 skb->prev->next = nskb;
152 skb->next->prev = nskb;
153 nskb->prev = skb->prev;
154 nskb->next = skb->next;
165 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
166 struct sk_buff_head *queue,
169 struct sk_buff **last)
171 bool peek_at_off = false;
175 if (unlikely(flags & MSG_PEEK && *off >= 0)) {
181 skb_queue_walk(queue, skb) {
182 if (flags & MSG_PEEK) {
183 if (peek_at_off && _off >= skb->len &&
184 (_off || skb->peeked)) {
189 skb = skb_set_peeked(skb);
195 refcount_inc(&skb->users);
197 __skb_unlink(skb, queue);
206 * __skb_try_recv_datagram - Receive a datagram skbuff
208 * @queue: socket queue from which to receive
209 * @flags: MSG\_ flags
210 * @off: an offset in bytes to peek skb from. Returns an offset
211 * within an skb where data actually starts
212 * @err: error code returned
213 * @last: set to last peeked message to inform the wait function
214 * what to look for when peeking
216 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
217 * and possible races. This replaces identical code in packet, raw and
218 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
219 * the long standing peek and read race for datagram sockets. If you
220 * alter this routine remember it must be re-entrant.
222 * This function will lock the socket if a skb is returned, so
223 * the caller needs to unlock the socket in that case (usually by
224 * calling skb_free_datagram). Returns NULL with @err set to
225 * -EAGAIN if no data was available or to some other value if an
226 * error was detected.
228 * * It does not lock socket since today. This function is
229 * * free of race conditions. This measure should/can improve
230 * * significantly datagram socket latencies at high loads,
231 * * when data copying to user space takes lots of time.
232 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
236 * The order of the tests when we find no data waiting are specified
237 * quite explicitly by POSIX 1003.1g, don't change them without having
238 * the standard around please.
240 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
241 struct sk_buff_head *queue,
242 unsigned int flags, int *off, int *err,
243 struct sk_buff **last)
246 unsigned long cpu_flags;
248 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
250 int error = sock_error(sk);
256 /* Again only user level code calls this function, so nothing
257 * interrupt level will suddenly eat the receive_queue.
259 * Look at current nfs client by the way...
260 * However, this function was correct in any case. 8)
262 spin_lock_irqsave(&queue->lock, cpu_flags);
263 skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
265 spin_unlock_irqrestore(&queue->lock, cpu_flags);
271 if (!sk_can_busy_loop(sk))
274 sk_busy_loop(sk, flags & MSG_DONTWAIT);
275 } while (READ_ONCE(queue->prev) != *last);
283 EXPORT_SYMBOL(__skb_try_recv_datagram);
285 struct sk_buff *__skb_recv_datagram(struct sock *sk,
286 struct sk_buff_head *sk_queue,
287 unsigned int flags, int *off, int *err)
289 struct sk_buff *skb, *last;
292 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
295 skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
303 !__skb_wait_for_more_packets(sk, sk_queue, err,
308 EXPORT_SYMBOL(__skb_recv_datagram);
310 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
315 return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
318 EXPORT_SYMBOL(skb_recv_datagram);
320 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
324 EXPORT_SYMBOL(skb_free_datagram);
326 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
330 if (!skb_unref(skb)) {
331 sk_peek_offset_bwd(sk, len);
335 slow = lock_sock_fast(sk);
336 sk_peek_offset_bwd(sk, len);
338 unlock_sock_fast(sk, slow);
340 /* skb is now orphaned, can be freed outside of locked section */
343 EXPORT_SYMBOL(__skb_free_datagram_locked);
345 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
346 struct sk_buff *skb, unsigned int flags,
347 void (*destructor)(struct sock *sk,
348 struct sk_buff *skb))
352 if (flags & MSG_PEEK) {
354 spin_lock_bh(&sk_queue->lock);
356 __skb_unlink(skb, sk_queue);
357 refcount_dec(&skb->users);
362 spin_unlock_bh(&sk_queue->lock);
365 atomic_inc(&sk->sk_drops);
368 EXPORT_SYMBOL(__sk_queue_drop_skb);
371 * skb_kill_datagram - Free a datagram skbuff forcibly
373 * @skb: datagram skbuff
374 * @flags: MSG\_ flags
376 * This function frees a datagram skbuff that was received by
377 * skb_recv_datagram. The flags argument must match the one
378 * used for skb_recv_datagram.
380 * If the MSG_PEEK flag is set, and the packet is still on the
381 * receive queue of the socket, it will be taken off the queue
382 * before it is freed.
384 * This function currently only disables BH when acquiring the
385 * sk_receive_queue lock. Therefore it must not be used in a
386 * context where that lock is acquired in an IRQ context.
388 * It returns 0 if the packet was removed by us.
391 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
393 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
399 EXPORT_SYMBOL(skb_kill_datagram);
401 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
403 void *data __always_unused,
404 struct iov_iter *i));
406 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
407 struct iov_iter *to, int len, bool fault_short,
408 size_t (*cb)(const void *, size_t, void *,
409 struct iov_iter *), void *data)
411 int start = skb_headlen(skb);
412 int i, copy = start - offset, start_off = offset, n;
413 struct sk_buff *frag_iter;
419 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
420 skb->data + offset, copy, data, to);
424 if ((len -= copy) == 0)
428 /* Copy paged appendix. Hmm... why does this look so complicated? */
429 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
431 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433 WARN_ON(start > offset + len);
435 end = start + skb_frag_size(frag);
436 if ((copy = end - offset) > 0) {
437 struct page *page = skb_frag_page(frag);
438 u8 *vaddr = kmap(page);
442 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
443 vaddr + skb_frag_off(frag) + offset - start,
455 skb_walk_frags(skb, frag_iter) {
458 WARN_ON(start > offset + len);
460 end = start + frag_iter->len;
461 if ((copy = end - offset) > 0) {
464 if (__skb_datagram_iter(frag_iter, offset - start,
465 to, copy, fault_short, cb, data))
467 if ((len -= copy) == 0)
476 /* This is not really a user copy fault, but rather someone
477 * gave us a bogus length on the skb. We should probably
478 * print a warning here as it may indicate a kernel bug.
482 iov_iter_revert(to, offset - start_off);
486 if (fault_short || iov_iter_count(to))
493 * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
495 * @skb: buffer to copy
496 * @offset: offset in the buffer to start copying from
497 * @to: iovec iterator to copy to
498 * @len: amount of data to copy from buffer to iovec
499 * @hash: hash request to update
501 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
502 struct iov_iter *to, int len,
503 struct ahash_request *hash)
505 return __skb_datagram_iter(skb, offset, to, len, true,
506 hash_and_copy_to_iter, hash);
508 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
510 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
511 void *data __always_unused, struct iov_iter *i)
513 return copy_to_iter(addr, bytes, i);
517 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
518 * @skb: buffer to copy
519 * @offset: offset in the buffer to start copying from
520 * @to: iovec iterator to copy to
521 * @len: amount of data to copy from buffer to iovec
523 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
524 struct iov_iter *to, int len)
526 trace_skb_copy_datagram_iovec(skb, len);
527 return __skb_datagram_iter(skb, offset, to, len, false,
528 simple_copy_to_iter, NULL);
530 EXPORT_SYMBOL(skb_copy_datagram_iter);
533 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
534 * @skb: buffer to copy
535 * @offset: offset in the buffer to start copying to
536 * @from: the copy source
537 * @len: amount of data to copy to buffer from iovec
539 * Returns 0 or -EFAULT.
541 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
542 struct iov_iter *from,
545 int start = skb_headlen(skb);
546 int i, copy = start - offset;
547 struct sk_buff *frag_iter;
553 if (copy_from_iter(skb->data + offset, copy, from) != copy)
555 if ((len -= copy) == 0)
560 /* Copy paged appendix. Hmm... why does this look so complicated? */
561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
563 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
565 WARN_ON(start > offset + len);
567 end = start + skb_frag_size(frag);
568 if ((copy = end - offset) > 0) {
573 copied = copy_page_from_iter(skb_frag_page(frag),
574 skb_frag_off(frag) + offset - start,
586 skb_walk_frags(skb, frag_iter) {
589 WARN_ON(start > offset + len);
591 end = start + frag_iter->len;
592 if ((copy = end - offset) > 0) {
595 if (skb_copy_datagram_from_iter(frag_iter,
599 if ((len -= copy) == 0)
611 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
613 int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
614 struct sk_buff *skb, struct iov_iter *from,
619 if (msg && msg->msg_ubuf && msg->sg_from_iter)
620 return msg->sg_from_iter(sk, skb, from, length);
622 frag = skb_shinfo(skb)->nr_frags;
624 while (length && iov_iter_count(from)) {
625 struct page *pages[MAX_SKB_FRAGS];
626 struct page *last_head = NULL;
629 unsigned long truesize;
632 if (frag == MAX_SKB_FRAGS)
635 copied = iov_iter_get_pages(from, pages, length,
636 MAX_SKB_FRAGS - frag, &start);
640 iov_iter_advance(from, copied);
643 truesize = PAGE_ALIGN(copied + start);
644 skb->data_len += copied;
646 skb->truesize += truesize;
647 if (sk && sk->sk_type == SOCK_STREAM) {
648 sk_wmem_queued_add(sk, truesize);
649 if (!skb_zcopy_pure(skb))
650 sk_mem_charge(sk, truesize);
652 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
654 for (refs = 0; copied != 0; start = 0) {
655 int size = min_t(int, copied, PAGE_SIZE - start);
656 struct page *head = compound_head(pages[n]);
658 start += (pages[n] - head) << PAGE_SHIFT;
662 skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1];
664 if (head == skb_frag_page(last) &&
665 start == skb_frag_off(last) + skb_frag_size(last)) {
666 skb_frag_size_add(last, size);
667 /* We combined this page, we need to release
668 * a reference. Since compound pages refcount
669 * is shared among many pages, batch the refcount
670 * adjustments to limit false sharing.
678 page_ref_sub(last_head, refs);
681 skb_fill_page_desc(skb, frag++, head, start, size);
684 page_ref_sub(last_head, refs);
688 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
691 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
692 * @skb: buffer to copy
693 * @from: the source to copy from
695 * The function will first copy up to headlen, and then pin the userspace
696 * pages and build frags through them.
698 * Returns 0, -EFAULT or -EMSGSIZE.
700 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
702 int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
704 /* copy up to skb headlen */
705 if (skb_copy_datagram_from_iter(skb, 0, from, copy))
708 return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U);
710 EXPORT_SYMBOL(zerocopy_sg_from_iter);
713 * skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
714 * and update a checksum.
715 * @skb: buffer to copy
716 * @offset: offset in the buffer to start copying from
717 * @to: iovec iterator to copy to
718 * @len: amount of data to copy from buffer to iovec
719 * @csump: checksum pointer
721 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
722 struct iov_iter *to, int len,
725 struct csum_state csdata = { .csum = *csump };
728 ret = __skb_datagram_iter(skb, offset, to, len, true,
729 csum_and_copy_to_iter, &csdata);
733 *csump = csdata.csum;
738 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
740 * @hlen: hardware length
743 * Caller _must_ check that skb will fit to this iovec.
745 * Returns: 0 - success.
746 * -EINVAL - checksum failure.
747 * -EFAULT - fault during copy.
749 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
750 int hlen, struct msghdr *msg)
753 int chunk = skb->len - hlen;
758 if (msg_data_left(msg) < chunk) {
759 if (__skb_checksum_complete(skb))
761 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
764 csum = csum_partial(skb->data, hlen, skb->csum);
765 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
769 if (csum_fold(csum)) {
770 iov_iter_revert(&msg->msg_iter, chunk);
774 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
775 !skb->csum_complete_sw)
776 netdev_rx_csum_fault(NULL, skb);
782 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
785 * datagram_poll - generic datagram poll
790 * Datagram poll: Again totally generic. This also handles
791 * sequenced packet sockets providing the socket receive queue
792 * is only ever holding data ready to receive.
794 * Note: when you *don't* use this routine for this protocol,
795 * and you use a different write policy from sock_writeable()
796 * then please supply your own write_space callback.
798 __poll_t datagram_poll(struct file *file, struct socket *sock,
801 struct sock *sk = sock->sk;
804 sock_poll_wait(file, sock, wait);
807 /* exceptional events? */
808 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
810 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
812 if (sk->sk_shutdown & RCV_SHUTDOWN)
813 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
814 if (sk->sk_shutdown == SHUTDOWN_MASK)
818 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
819 mask |= EPOLLIN | EPOLLRDNORM;
821 /* Connection-based need to check for termination and startup */
822 if (connection_based(sk)) {
823 if (sk->sk_state == TCP_CLOSE)
825 /* connection hasn't started yet? */
826 if (sk->sk_state == TCP_SYN_SENT)
831 if (sock_writeable(sk))
832 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
834 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
838 EXPORT_SYMBOL(datagram_poll);