1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
20 #include "xsk_queue.h"
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
24 static DEFINE_IDA(umem_ida);
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
33 spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
34 list_add_rcu(&xs->list, &umem->xsk_tx_list);
35 spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
38 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
45 spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
46 list_del_rcu(&xs->list);
47 spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
50 /* The umem is stored both in the _rx struct and the _tx struct as we do
51 * not know if the device has more tx queues than rx, or the opposite.
52 * This might also change during run time.
54 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
57 if (queue_id >= max_t(unsigned int,
58 dev->real_num_rx_queues,
59 dev->real_num_tx_queues))
62 if (queue_id < dev->real_num_rx_queues)
63 dev->_rx[queue_id].umem = umem;
64 if (queue_id < dev->real_num_tx_queues)
65 dev->_tx[queue_id].umem = umem;
70 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
73 if (queue_id < dev->real_num_rx_queues)
74 return dev->_rx[queue_id].umem;
75 if (queue_id < dev->real_num_tx_queues)
76 return dev->_tx[queue_id].umem;
80 EXPORT_SYMBOL(xdp_get_umem_from_qid);
82 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
84 if (queue_id < dev->real_num_rx_queues)
85 dev->_rx[queue_id].umem = NULL;
86 if (queue_id < dev->real_num_tx_queues)
87 dev->_tx[queue_id].umem = NULL;
90 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
91 u16 queue_id, u16 flags)
93 bool force_zc, force_copy;
94 struct netdev_bpf bpf;
99 force_zc = flags & XDP_ZEROCOPY;
100 force_copy = flags & XDP_COPY;
102 if (force_zc && force_copy)
105 if (xdp_get_umem_from_qid(dev, queue_id))
108 err = xdp_reg_umem_at_qid(dev, umem, queue_id);
113 umem->queue_id = queue_id;
115 if (flags & XDP_USE_NEED_WAKEUP) {
116 umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
117 /* Tx needs to be explicitly woken up the first time.
118 * Also for supporting drivers that do not implement this
119 * feature. They will always have to call sendto().
121 xsk_set_tx_need_wakeup(umem);
127 /* For copy-mode, we are done. */
130 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
135 bpf.command = XDP_SETUP_XSK_UMEM;
137 bpf.xsk.queue_id = queue_id;
139 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
148 err = 0; /* fallback to copy mode */
150 xdp_clear_umem_at_qid(dev, queue_id);
154 void xdp_umem_clear_dev(struct xdp_umem *umem)
156 struct netdev_bpf bpf;
165 bpf.command = XDP_SETUP_XSK_UMEM;
167 bpf.xsk.queue_id = umem->queue_id;
169 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
172 WARN(1, "failed to disable umem!\n");
175 xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
182 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
184 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
190 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
193 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
194 free_uid(umem->user);
198 static void xdp_umem_release(struct xdp_umem *umem)
201 xdp_umem_clear_dev(umem);
204 ida_simple_remove(&umem_ida, umem->id);
207 xskq_destroy(umem->fq);
212 xskq_destroy(umem->cq);
216 xp_destroy(umem->pool);
217 xdp_umem_unpin_pages(umem);
219 xdp_umem_unaccount_pages(umem);
223 static void xdp_umem_release_deferred(struct work_struct *work)
225 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
227 xdp_umem_release(umem);
230 void xdp_get_umem(struct xdp_umem *umem)
232 refcount_inc(&umem->users);
235 void xdp_put_umem(struct xdp_umem *umem)
240 if (refcount_dec_and_test(&umem->users)) {
241 INIT_WORK(&umem->work, xdp_umem_release_deferred);
242 schedule_work(&umem->work);
246 static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
248 unsigned int gup_flags = FOLL_WRITE;
252 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
253 GFP_KERNEL | __GFP_NOWARN);
257 mmap_read_lock(current->mm);
258 npgs = pin_user_pages(address, umem->npgs,
259 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
260 mmap_read_unlock(current->mm);
262 if (npgs != umem->npgs) {
274 xdp_umem_unpin_pages(umem);
281 static int xdp_umem_account_pages(struct xdp_umem *umem)
283 unsigned long lock_limit, new_npgs, old_npgs;
285 if (capable(CAP_IPC_LOCK))
288 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
289 umem->user = get_uid(current_user());
292 old_npgs = atomic_long_read(&umem->user->locked_vm);
293 new_npgs = old_npgs + umem->npgs;
294 if (new_npgs > lock_limit) {
295 free_uid(umem->user);
299 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
300 new_npgs) != old_npgs);
304 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
306 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
307 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
308 u64 npgs, addr = mr->addr, size = mr->len;
309 unsigned int chunks, chunks_per_page;
312 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
313 /* Strictly speaking we could support this, if:
315 * - using an IOMMU, or
316 * - making sure the memory area is consecutive
317 * but for now, we simply say "computer says no".
322 if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
323 XDP_UMEM_USES_NEED_WAKEUP))
326 if (!unaligned_chunks && !is_power_of_2(chunk_size))
329 if (!PAGE_ALIGNED(addr)) {
330 /* Memory area has to be page size aligned. For
331 * simplicity, this might change.
336 if ((addr + size) < addr)
339 npgs = size >> PAGE_SHIFT;
343 chunks = (unsigned int)div_u64(size, chunk_size);
347 if (!unaligned_chunks) {
348 chunks_per_page = PAGE_SIZE / chunk_size;
349 if (chunks < chunks_per_page || chunks % chunks_per_page)
353 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
357 umem->headroom = headroom;
358 umem->chunk_size = chunk_size;
359 umem->npgs = (u32)npgs;
362 umem->flags = mr->flags;
363 INIT_LIST_HEAD(&umem->xsk_tx_list);
364 spin_lock_init(&umem->xsk_tx_list_lock);
366 refcount_set(&umem->users, 1);
368 err = xdp_umem_account_pages(umem);
372 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
376 umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size,
377 headroom, size, unaligned_chunks);
385 xdp_umem_unpin_pages(umem);
387 xdp_umem_unaccount_pages(umem);
391 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
393 struct xdp_umem *umem;
396 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
398 return ERR_PTR(-ENOMEM);
400 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
407 err = xdp_umem_reg(umem, mr);
409 ida_simple_remove(&umem_ida, umem->id);
417 bool xdp_umem_validate_queues(struct xdp_umem *umem)
419 return umem->fq && umem->cq;