1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
9 #define ICE_DFLT_IRQ_WORK 256
10 #define ICE_RXBUF_3072 3072
11 #define ICE_RXBUF_2048 2048
12 #define ICE_RXBUF_1536 1536
13 #define ICE_MAX_CHAINED_RX_BUFS 5
14 #define ICE_MAX_BUF_TXD 8
15 #define ICE_MIN_TX_LEN 17
17 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
18 * In order to align with the read requests we will align the value to
19 * the nearest 4K which represents our maximum read request size.
21 #define ICE_MAX_READ_REQ_SIZE 4096
22 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
23 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
24 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
26 #define ICE_MAX_TXQ_PER_TXQG 128
28 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
29 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
30 * This leaves us with 512 bytes of room. From that we need to deduct the
31 * space needed for the shared info and the padding needed to IP align the
34 * Note: For cache line sizes 256 or larger this value is going to end
35 * up negative. In these cases we should fall back to the legacy
38 #if (PAGE_SIZE < 8192)
39 #define ICE_2K_TOO_SMALL_WITH_PADDING \
40 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
41 SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
44 * ice_compute_pad - compute the padding
45 * @rx_buf_len: buffer length
47 * Figure out the size of half page based on given buffer length and
48 * then subtract the skb_shared_info followed by subtraction of the
49 * actual buffer length; this in turn results in the actual space that
50 * is left for padding usage
52 static inline int ice_compute_pad(int rx_buf_len)
56 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
57 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
61 * ice_skb_pad - determine the padding that we can supply
63 * Figure out the right Rx buffer size and based on that calculate the
66 static inline int ice_skb_pad(void)
70 /* If a 2K buffer cannot handle a standard Ethernet frame then
71 * optimize padding for a 3K buffer instead of a 1.5K buffer.
73 * For a 3K buffer we need to add enough padding to allow for
74 * tailroom due to NET_IP_ALIGN possibly shifting us out of
75 * cache-line alignment.
77 if (ICE_2K_TOO_SMALL_WITH_PADDING)
78 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
80 rx_buf_len = ICE_RXBUF_1536;
82 /* if needed make room for NET_IP_ALIGN */
83 rx_buf_len -= NET_IP_ALIGN;
85 return ice_compute_pad(rx_buf_len);
88 #define ICE_SKB_PAD ice_skb_pad()
90 #define ICE_2K_TOO_SMALL_WITH_PADDING false
91 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
94 /* We are assuming that the cache line is always 64 Bytes here for ice.
95 * In order to make sure that is a correct assumption there is a check in probe
96 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
97 * size is 128 bytes. We do it this way because we do not want to read the
98 * GLPCI_CNF2 register or a variable containing the value on every pass through
101 #define ICE_CACHE_LINE_BYTES 64
102 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
103 sizeof(struct ice_tx_desc))
104 #define ICE_DESCS_FOR_CTX_DESC 1
105 #define ICE_DESCS_FOR_SKB_DATA_PTR 1
106 /* Tx descriptors needed, worst case */
107 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
108 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
109 #define ICE_DESC_UNUSED(R) \
110 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
111 (R)->next_to_clean - (R)->next_to_use - 1)
113 #define ICE_RING_QUARTER(R) ((R)->count >> 2)
115 #define ICE_TX_FLAGS_TSO BIT(0)
116 #define ICE_TX_FLAGS_HW_VLAN BIT(1)
117 #define ICE_TX_FLAGS_SW_VLAN BIT(2)
118 /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
119 * freed instead of returned like skb packets.
121 #define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
122 #define ICE_TX_FLAGS_TSYN BIT(4)
123 #define ICE_TX_FLAGS_IPV4 BIT(5)
124 #define ICE_TX_FLAGS_IPV6 BIT(6)
125 #define ICE_TX_FLAGS_TUNNEL BIT(7)
126 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
127 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
128 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
129 #define ICE_TX_FLAGS_VLAN_PR_S 29
130 #define ICE_TX_FLAGS_VLAN_S 16
132 #define ICE_XDP_PASS 0
133 #define ICE_XDP_CONSUMED BIT(0)
134 #define ICE_XDP_TX BIT(1)
135 #define ICE_XDP_REDIR BIT(2)
137 #define ICE_RX_DMA_ATTR \
138 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
140 #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
142 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
145 struct ice_tx_desc *next_to_watch;
148 void *raw_buf; /* used for XDP */
150 unsigned int bytecount;
151 unsigned short gso_segs;
153 DEFINE_DMA_UNMAP_LEN(len);
154 DEFINE_DMA_UNMAP_ADDR(dma);
157 struct ice_tx_offload_params {
159 struct ice_tx_ring *tx_ring;
163 u32 cd_tunnel_params;
171 unsigned int page_offset;
180 struct ice_txq_stats {
184 int prev_pkt; /* negative if no pending Tx descriptors */
187 struct ice_rxq_stats {
189 u64 alloc_page_failed;
190 u64 alloc_buf_failed;
193 enum ice_ring_state_t {
194 ICE_TX_XPS_INIT_DONE,
198 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
199 * registers and QINT registers or more generally anywhere in the manual
200 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
201 * register but instead is a special value meaning "don't update" ITR0/1/2.
207 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
210 /* Header split modes defined by DTYPE field of Rx RLAN context */
212 ICE_RX_DTYPE_NO_SPLIT = 0,
213 ICE_RX_DTYPE_HEADER_SPLIT = 1,
214 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
217 /* indices into GLINT_ITR registers */
218 #define ICE_RX_ITR ICE_IDX_ITR0
219 #define ICE_TX_ITR ICE_IDX_ITR1
220 #define ICE_ITR_8K 124
221 #define ICE_ITR_20K 50
222 #define ICE_ITR_MAX 8160 /* 0x1FE0 */
223 #define ICE_DFLT_TX_ITR ICE_ITR_20K
224 #define ICE_DFLT_RX_ITR ICE_ITR_20K
225 enum ice_dynamic_itr {
230 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
231 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
232 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
233 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
234 #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
236 #define ICE_DFLT_INTRL 0
237 #define ICE_MAX_INTRL 236
239 #define ICE_IN_WB_ON_ITR_MODE 255
240 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
241 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
242 * set the write-back latency to the usecs passed in.
244 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
245 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
246 GLINT_DYN_CTL_INTERVAL_M) | \
247 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
248 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
249 GLINT_DYN_CTL_WB_ON_ITR_M)
251 /* Legacy or Advanced Mode Queue */
252 #define ICE_TX_ADVANCED 0
253 #define ICE_TX_LEGACY 1
255 /* descriptor ring, associated with a VSI */
257 /* CL1 - 1st cacheline starts here */
258 struct ice_rx_ring *next; /* pointer to next ring in q_vector */
259 void *desc; /* Descriptor ring memory */
260 struct device *dev; /* Used for DMA mapping */
261 struct net_device *netdev; /* netdev ring maps to */
262 struct ice_vsi *vsi; /* Backreference to associated VSI */
263 struct ice_q_vector *q_vector; /* Backreference to associated vector */
266 struct ice_rx_buf *rx_buf;
267 struct xdp_buff **xdp_buf;
269 /* CL2 - 2nd cacheline starts here */
270 struct xdp_rxq_info xdp_rxq;
271 /* CL3 - 3rd cacheline starts here */
272 u16 q_index; /* Queue number of ring */
274 u16 count; /* Number of descriptors */
275 u16 reg_idx; /* HW register index of the ring */
277 /* used in interrupt processing */
285 struct ice_rxq_stats rx_stats;
286 struct ice_q_stats stats;
287 struct u64_stats_sync syncp;
289 struct rcu_head rcu; /* to avoid race on free */
290 /* CL4 - 3rd cacheline starts here */
291 struct ice_channel *ch;
292 struct bpf_prog *xdp_prog;
293 struct ice_tx_ring *xdp_ring;
294 struct xsk_buff_pool *xsk_pool;
296 dma_addr_t dma; /* physical address of ring */
297 #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
299 u8 dcb_tc; /* Traffic class of ring */
302 } ____cacheline_internodealigned_in_smp;
305 /* CL1 - 1st cacheline starts here */
306 struct ice_tx_ring *next; /* pointer to next ring in q_vector */
307 void *desc; /* Descriptor ring memory */
308 struct device *dev; /* Used for DMA mapping */
310 struct ice_tx_buf *tx_buf;
311 struct ice_q_vector *q_vector; /* Backreference to associated vector */
312 struct net_device *netdev; /* netdev ring maps to */
313 struct ice_vsi *vsi; /* Backreference to associated VSI */
314 /* CL2 - 2nd cacheline starts here */
315 dma_addr_t dma; /* physical address of ring */
316 struct xsk_buff_pool *xsk_pool;
321 u16 q_handle; /* Queue handle per TC */
322 u16 reg_idx; /* HW register index of the ring */
323 u16 count; /* Number of descriptors */
324 u16 q_index; /* Queue number of ring */
326 struct ice_txq_stats tx_stats;
327 /* CL3 - 3rd cacheline starts here */
328 struct ice_q_stats stats;
329 struct u64_stats_sync syncp;
330 struct rcu_head rcu; /* to avoid race on free */
331 DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
332 struct ice_channel *ch;
333 struct ice_ptp_tx *tx_tstamps;
335 u32 txq_teid; /* Added Tx queue TEID */
336 /* CL4 - 4th cacheline starts here */
338 #define ICE_TX_FLAGS_RING_XDP BIT(0)
339 #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
340 #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
342 u8 dcb_tc; /* Traffic class of ring */
344 } ____cacheline_internodealigned_in_smp;
346 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
348 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
351 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
353 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
356 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
358 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
361 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
366 static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
368 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
371 enum ice_container_type {
376 struct ice_ring_container {
377 /* head of linked-list of rings */
379 struct ice_rx_ring *rx_ring;
380 struct ice_tx_ring *tx_ring;
382 struct dim dim; /* data for net_dim algorithm */
383 u16 itr_idx; /* index in the interrupt vector */
384 /* this matches the maximum number of ITR bits, but in usec
385 * values, so it is shifted left one bit (bit zero is ignored)
395 enum ice_container_type type;
398 struct ice_coalesce_stored {
406 /* iterator for handling rings in ring container */
407 #define ice_for_each_rx_ring(pos, head) \
408 for (pos = (head).rx_ring; pos; pos = pos->next)
410 #define ice_for_each_tx_ring(pos, head) \
411 for (pos = (head).tx_ring; pos; pos = pos->next)
413 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
415 #if (PAGE_SIZE < 8192)
416 if (ring->rx_buf_len > (PAGE_SIZE / 2))
422 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
424 union ice_32b_rx_flex_desc;
426 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
427 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
429 ice_select_queue(struct net_device *dev, struct sk_buff *skb,
430 struct net_device *sb_dev);
431 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
432 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
433 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
434 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
435 void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
436 void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
437 int ice_napi_poll(struct napi_struct *napi, int budget);
439 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
441 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
442 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
443 #endif /* _ICE_TXRX_H_ */