1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
27 struct user_struct *user;
33 struct list_head xsk_dma_list;
34 struct work_struct work;
39 spinlock_t lock; /* Synchronize map updates */
40 struct xdp_sock *xsk_map[];
44 /* struct sock must be the first member of struct xdp_sock */
46 struct xsk_queue *rx ____cacheline_aligned_in_smp;
47 struct net_device *dev;
48 struct xdp_umem *umem;
49 struct list_head flush_node;
50 struct xsk_buff_pool *pool;
59 struct xsk_queue *tx ____cacheline_aligned_in_smp;
60 struct list_head tx_list;
61 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
62 * in the SKB destructor callback.
64 spinlock_t tx_completion_lock;
65 /* Protects generic receive. */
72 struct list_head map_list;
73 /* Protects map_list */
74 spinlock_t map_list_lock;
75 /* Protects multiple processes in the control path */
77 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
78 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
81 #ifdef CONFIG_XDP_SOCKETS
83 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
84 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
85 void __xsk_map_flush(void);
87 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
90 struct xsk_map *m = container_of(map, struct xsk_map, map);
93 if (key >= map->max_entries)
96 xs = READ_ONCE(m->xsk_map[key]);
102 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
107 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
112 static inline void __xsk_map_flush(void)
116 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
122 #endif /* CONFIG_XDP_SOCKETS */
124 #endif /* _LINUX_XDP_SOCK_H */