3 * Copyright(c) 2013 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Author: Eliezer Tamir
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 #ifndef _LINUX_NET_LL_POLL_H
25 #define _LINUX_NET_LL_POLL_H
27 #include <linux/netdevice.h>
30 #ifdef CONFIG_NET_LL_RX_POLL
33 extern unsigned int sysctl_net_ll_read __read_mostly;
34 extern unsigned int sysctl_net_ll_poll __read_mostly;
36 /* return values from ndo_ll_poll */
37 #define LL_FLUSH_FAILED -1
38 #define LL_FLUSH_BUSY -2
40 /* a wrapper to make debug_smp_processor_id() happy
41 * we can use sched_clock() because we don't care much about precision
42 * we only care that the average is bounded
44 #ifdef CONFIG_DEBUG_PREEMPT
45 static inline u64 ll_sched_clock(void)
49 preempt_disable_notrace();
51 preempt_enable_no_resched_notrace();
55 #else /* CONFIG_DEBUG_PREEMPT */
56 static inline u64 ll_sched_clock(void)
60 #endif /* CONFIG_DEBUG_PREEMPT */
62 /* we don't mind a ~2.5% imprecision so <<10 instead of *1000
63 * sk->sk_ll_usec is a u_int so this can't overflow
65 static inline u64 ll_sk_end_time(struct sock *sk)
67 return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + ll_sched_clock();
70 /* in poll/select we use the global sysctl_net_ll_poll value */
71 static inline u64 ll_end_time(void)
73 return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + ll_sched_clock();
76 static inline bool sk_valid_ll(struct sock *sk)
78 return sk->sk_ll_usec && sk->sk_napi_id &&
79 !need_resched() && !signal_pending(current);
82 static inline bool can_poll_ll(u64 end_time)
84 return !time_after64(ll_sched_clock(), end_time);
87 /* when used in sock_poll() nonblock is known at compile time to be true
88 * so the loop and end_time will be optimized out
90 static inline bool sk_poll_ll(struct sock *sk, int nonblock)
92 u64 end_time = nonblock ? 0 : ll_sk_end_time(sk);
93 const struct net_device_ops *ops;
94 struct napi_struct *napi;
98 * rcu read lock for napi hash
99 * bh so we don't race with net_rx_action
103 napi = napi_by_id(sk->sk_napi_id);
107 ops = napi->dev->netdev_ops;
108 if (!ops->ndo_ll_poll)
112 rc = ops->ndo_ll_poll(napi);
114 if (rc == LL_FLUSH_FAILED)
115 break; /* permanent failure */
118 /* local bh are disabled so it is ok to use _BH */
119 NET_ADD_STATS_BH(sock_net(sk),
120 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
122 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
123 can_poll_ll(end_time));
125 rc = !skb_queue_empty(&sk->sk_receive_queue);
127 rcu_read_unlock_bh();
131 /* used in the NIC receive handler to mark the skb */
132 static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
134 skb->napi_id = napi->napi_id;
137 /* used in the protocol hanlder to propagate the napi_id to the socket */
138 static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
140 sk->sk_napi_id = skb->napi_id;
143 #else /* CONFIG_NET_LL_RX_POLL */
145 static inline u64 sk_ll_end_time(struct sock *sk)
150 static inline u64 ll_end_time(void)
155 static inline bool sk_valid_ll(struct sock *sk)
160 static inline bool sk_poll_ll(struct sock *sk, int nonblock)
165 static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
169 static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
173 static inline bool can_poll_ll(u64 end_time)
178 #endif /* CONFIG_NET_LL_RX_POLL */
179 #endif /* _LINUX_NET_LL_POLL_H */