1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
16 #include "xsk_queue.h"
18 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
20 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
24 spin_lock_irqsave(&umem->xsk_list_lock, flags);
25 list_add_rcu(&xs->list, &umem->xsk_list);
26 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
29 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
34 spin_lock_irqsave(&umem->xsk_list_lock, flags);
35 list_del_rcu(&xs->list);
36 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
43 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
44 u32 queue_id, u16 flags)
46 bool force_zc, force_copy;
47 struct netdev_bpf bpf;
50 force_zc = flags & XDP_ZEROCOPY;
51 force_copy = flags & XDP_COPY;
53 if (force_zc && force_copy)
61 if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) {
62 bpf.command = XDP_QUERY_XSK_UMEM;
65 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
70 return force_zc ? -ENOTSUPP : 0;
73 bpf.command = XDP_SETUP_XSK_UMEM;
75 bpf.xsk.queue_id = queue_id;
78 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
83 return force_zc ? err : 0; /* fail or fallback */
87 umem->queue_id = queue_id;
93 return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
96 static void xdp_umem_clear_dev(struct xdp_umem *umem)
98 struct netdev_bpf bpf;
102 bpf.command = XDP_SETUP_XSK_UMEM;
104 bpf.xsk.queue_id = umem->queue_id;
107 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
111 WARN(1, "failed to disable umem!\n");
118 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
122 for (i = 0; i < umem->npgs; i++) {
123 struct page *page = umem->pgs[i];
125 set_page_dirty_lock(page);
133 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
136 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
137 free_uid(umem->user);
141 static void xdp_umem_release(struct xdp_umem *umem)
143 struct task_struct *task;
144 struct mm_struct *mm;
146 xdp_umem_clear_dev(umem);
149 xskq_destroy(umem->fq);
154 xskq_destroy(umem->cq);
158 xdp_umem_unpin_pages(umem);
160 task = get_pid_task(umem->pid, PIDTYPE_PID);
164 mm = get_task_mm(task);
165 put_task_struct(task);
173 xdp_umem_unaccount_pages(umem);
178 static void xdp_umem_release_deferred(struct work_struct *work)
180 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
182 xdp_umem_release(umem);
185 void xdp_get_umem(struct xdp_umem *umem)
187 refcount_inc(&umem->users);
190 void xdp_put_umem(struct xdp_umem *umem)
195 if (refcount_dec_and_test(&umem->users)) {
196 INIT_WORK(&umem->work, xdp_umem_release_deferred);
197 schedule_work(&umem->work);
201 static int xdp_umem_pin_pages(struct xdp_umem *umem)
203 unsigned int gup_flags = FOLL_WRITE;
207 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
208 GFP_KERNEL | __GFP_NOWARN);
212 down_write(¤t->mm->mmap_sem);
213 npgs = get_user_pages(umem->address, umem->npgs,
214 gup_flags, &umem->pgs[0], NULL);
215 up_write(¤t->mm->mmap_sem);
217 if (npgs != umem->npgs) {
229 xdp_umem_unpin_pages(umem);
236 static int xdp_umem_account_pages(struct xdp_umem *umem)
238 unsigned long lock_limit, new_npgs, old_npgs;
240 if (capable(CAP_IPC_LOCK))
243 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
244 umem->user = get_uid(current_user());
247 old_npgs = atomic_long_read(&umem->user->locked_vm);
248 new_npgs = old_npgs + umem->npgs;
249 if (new_npgs > lock_limit) {
250 free_uid(umem->user);
254 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
255 new_npgs) != old_npgs);
259 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
261 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
262 unsigned int chunks, chunks_per_page;
263 u64 addr = mr->addr, size = mr->len;
264 int size_chk, err, i;
266 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
267 /* Strictly speaking we could support this, if:
269 * - using an IOMMU, or
270 * - making sure the memory area is consecutive
271 * but for now, we simply say "computer says no".
276 if (!is_power_of_2(chunk_size))
279 if (!PAGE_ALIGNED(addr)) {
280 /* Memory area has to be page size aligned. For
281 * simplicity, this might change.
286 if ((addr + size) < addr)
289 chunks = (unsigned int)div_u64(size, chunk_size);
293 chunks_per_page = PAGE_SIZE / chunk_size;
294 if (chunks < chunks_per_page || chunks % chunks_per_page)
297 headroom = ALIGN(headroom, 64);
299 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
303 umem->pid = get_task_pid(current, PIDTYPE_PID);
304 umem->address = (unsigned long)addr;
305 umem->props.chunk_mask = ~((u64)chunk_size - 1);
306 umem->props.size = size;
307 umem->headroom = headroom;
308 umem->chunk_size_nohr = chunk_size - headroom;
309 umem->npgs = size / PAGE_SIZE;
312 INIT_LIST_HEAD(&umem->xsk_list);
313 spin_lock_init(&umem->xsk_list_lock);
315 refcount_set(&umem->users, 1);
317 err = xdp_umem_account_pages(umem);
321 err = xdp_umem_pin_pages(umem);
325 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
331 for (i = 0; i < umem->npgs; i++)
332 umem->pages[i].addr = page_address(umem->pgs[i]);
337 xdp_umem_unaccount_pages(umem);
343 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
345 struct xdp_umem *umem;
348 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
350 return ERR_PTR(-ENOMEM);
352 err = xdp_umem_reg(umem, mr);
361 bool xdp_umem_validate_queues(struct xdp_umem *umem)
363 return umem->fq && umem->cq;