static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ enum skb_drop_reason drop_reason;
int txq = skb->queue_mapping;
struct netdev_queue *queue;
struct tun_file *tfile;
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tfile)
+ if (!tfile) {
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
+ }
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
- if (!check_filter(&tun->txflt, skb))
+ if (!check_filter(&tun->txflt, skb)) {
+ drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
goto drop;
+ }
if (tfile->socket.sk->sk_filter &&
- sk_filter(tfile->socket.sk, skb))
+ sk_filter(tfile->socket.sk, skb)) {
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
goto drop;
+ }
len = run_ebpf_filter(tun, skb, len);
- if (len == 0 || pskb_trim(skb, len))
+ if (len == 0) {
+ drop_reason = SKB_DROP_REASON_TAP_FILTER;
goto drop;
+ }
+
+ if (pskb_trim(skb, len)) {
+ drop_reason = SKB_DROP_REASON_NOMEM;
+ goto drop;
+ }
- if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto drop;
+ }
skb_tx_timestamp(skb);
nf_reset_ct(skb);
- if (ptr_ring_produce(&tfile->tx_ring, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
return NETDEV_TX_OK;
drop:
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
skb_tx_error(skb);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
rcu_read_unlock();
return NET_XMIT_DROP;
}
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
break;
}
nxmit++;
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
break;
}
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
+ enum skb_drop_reason drop_reason;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
return PTR_ERR(skb);
}
if (!skb)
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
if (err) {
err = -EFAULT;
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
drop:
- atomic_long_inc(&tun->dev->rx_dropped);
- kfree_skb(skb);
+ dev_core_stats_rx_dropped_inc(tun->dev);
+ kfree_skb_reason(skb, drop_reason);
if (frags) {
tfile->napi.skb = NULL;
mutex_unlock(&tfile->napi_mutex);
pi.proto = htons(ETH_P_IPV6);
break;
default:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
kfree_skb(skb);
return -EINVAL;
}
case IFF_TAP:
if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
err = -ENOMEM;
+ drop_reason = SKB_DROP_REASON_HDR_TRUNC;
goto drop;
}
skb->protocol = eth_type_trans(skb, tun->dev);
if (unlikely(!(tun->dev->flags & IFF_UP))) {
err = -EIO;
rcu_read_unlock();
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
tun_rx_batched(tun, tfile, skb, more);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
rcu_read_unlock();
struct virtio_net_hdr *gso = &hdr->gso;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
+ struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
- int err = 0;
+ int ret = 0;
bool skb_xdp = false;
struct page *page;
xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- err = tun_xdp_act(tun, xdp_prog, xdp, act);
- if (err < 0) {
+ ret = tun_xdp_act(tun, xdp_prog, xdp, act);
+ if (ret < 0) {
put_page(virt_to_head_page(xdp->data));
- return err;
+ return ret;
}
- switch (err) {
+ switch (ret) {
case XDP_REDIRECT:
*flush = true;
fallthrough;
build:
skb = build_skb(xdp->data_hard_start, buflen);
if (!skb) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- err = do_xdp_generic(xdp_prog, skb);
- if (err != XDP_PASS)
+ ret = do_xdp_generic(xdp_prog, skb);
+ if (ret != XDP_PASS) {
+ ret = 0;
goto out;
+ }
}
if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- netif_receive_skb(skb);
+ if (tfile->napi_enabled) {
+ queue = &tfile->sk.sk_write_queue;
+ spin_lock(&queue->lock);
+ __skb_queue_tail(queue, skb);
+ spin_unlock(&queue->lock);
+ ret = 1;
+ } else {
+ netif_receive_skb(skb);
+ ret = 0;
+ }
/* No need to disable preemption here since this function is
* always called with bh disabled
tun_flow_update(tun, rxhash, tfile);
out:
- return err;
+ return ret;
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (!tun)
return -EBADFD;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
struct tun_page tpage;
int n = ctl->num;
- int flush = 0;
+ int flush = 0, queued = 0;
memset(&tpage, 0, sizeof(tpage));
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ if (ret > 0)
+ queued += ret;
}
if (flush)
xdp_do_flush();
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
rcu_read_unlock();
local_bh_enable();