skb_queue_purge(&tfile->sk.sk_error_queue);
}
-static void tun_cleanup_tx_ring(struct tun_file *tfile)
-{
- if (tfile->tx_ring.queue) {
- ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- xdp_rxq_info_unreg(&tfile->xdp_rxq);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
- }
-}
-
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- tun_cleanup_tx_ring(tfile);
+ if (tun)
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
}
}
tun_napi_del(tun, tfile);
/* Drop read queue */
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
}
if (!tfile->detached &&
- ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
+ GFP_KERNEL, tun_ptr_free)) {
err = -ENOMEM;
goto out;
}
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
+ if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
+ sk_free(&tfile->sk);
+ return -ENOMEM;
+ }
+
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-
return 0;
}
struct tun_file *tfile = file->private_data;
tun_detach(tfile, true);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
return 0;
}