1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
7 #define ICE_DFLT_IRQ_WORK 256
8 #define ICE_RXBUF_2048 2048
9 #define ICE_MAX_CHAINED_RX_BUFS 5
10 #define ICE_MAX_BUF_TXD 8
11 #define ICE_MIN_TX_LEN 17
13 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
14 * In order to align with the read requests we will align the value to
15 * the nearest 4K which represents our maximum read request size.
17 #define ICE_MAX_READ_REQ_SIZE 4096
18 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
19 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
20 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
23 #define ICE_MAX_TXQ_PER_TXQG 128
25 /* We are assuming that the cache line is always 64 Bytes here for ice.
26 * In order to make sure that is a correct assumption there is a check in probe
27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
28 * size is 128 bytes. We do it this way because we do not want to read the
29 * GLPCI_CNF2 register or a variable containing the value on every pass through
32 #define ICE_CACHE_LINE_BYTES 64
33 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
34 sizeof(struct ice_tx_desc))
35 #define ICE_DESCS_FOR_CTX_DESC 1
36 #define ICE_DESCS_FOR_SKB_DATA_PTR 1
37 /* Tx descriptors needed, worst case */
38 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
39 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
40 #define ICE_DESC_UNUSED(R) \
41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
42 (R)->next_to_clean - (R)->next_to_use - 1)
44 #define ICE_TX_FLAGS_TSO BIT(0)
45 #define ICE_TX_FLAGS_HW_VLAN BIT(1)
46 #define ICE_TX_FLAGS_SW_VLAN BIT(2)
47 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
48 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
49 #define ICE_TX_FLAGS_VLAN_PR_S 29
50 #define ICE_TX_FLAGS_VLAN_S 16
52 #define ICE_RX_DMA_ATTR \
53 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
56 struct ice_tx_desc *next_to_watch;
58 unsigned int bytecount;
59 unsigned short gso_segs;
61 DEFINE_DMA_UNMAP_LEN(len);
62 DEFINE_DMA_UNMAP_ADDR(dma);
65 struct ice_tx_offload_params {
67 struct ice_ring *tx_ring;
80 unsigned int page_offset;
89 struct ice_txq_stats {
93 int prev_pkt; /* negative if no pending Tx descriptors */
96 struct ice_rxq_stats {
98 u64 alloc_page_failed;
100 u64 page_reuse_count;
103 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
104 * registers and QINT registers or more generally anywhere in the manual
105 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
106 * register but instead is a special value meaning "don't update" ITR0/1/2.
112 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
115 /* Header split modes defined by DTYPE field of Rx RLAN context */
117 ICE_RX_DTYPE_NO_SPLIT = 0,
118 ICE_RX_DTYPE_HEADER_SPLIT = 1,
119 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
122 /* indices into GLINT_ITR registers */
123 #define ICE_RX_ITR ICE_IDX_ITR0
124 #define ICE_TX_ITR ICE_IDX_ITR1
125 #define ICE_ITR_8K 124
126 #define ICE_ITR_20K 50
127 #define ICE_ITR_MAX 8160
128 #define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
129 #define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
130 #define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
131 #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
132 #define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
133 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
134 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
135 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
136 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
138 #define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
139 #define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
140 #define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
141 #define ICE_ITR_ADAPTIVE_LATENCY 0x8000
142 #define ICE_ITR_ADAPTIVE_BULK 0x0000
144 #define ICE_DFLT_INTRL 0
145 #define ICE_MAX_INTRL 236
147 #define ICE_WB_ON_ITR_USECS 2
148 #define ICE_IN_WB_ON_ITR_MODE 255
149 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
150 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
151 * set the write-back latency to the usecs passed in.
153 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
154 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
155 GLINT_DYN_CTL_INTERVAL_M) | \
156 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
157 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
158 GLINT_DYN_CTL_WB_ON_ITR_M)
160 /* Legacy or Advanced Mode Queue */
161 #define ICE_TX_ADVANCED 0
162 #define ICE_TX_LEGACY 1
164 /* descriptor ring, associated with a VSI */
166 /* CL1 - 1st cacheline starts here */
167 struct ice_ring *next; /* pointer to next ring in q_vector */
168 void *desc; /* Descriptor ring memory */
169 struct device *dev; /* Used for DMA mapping */
170 struct net_device *netdev; /* netdev ring maps to */
171 struct ice_vsi *vsi; /* Backreference to associated VSI */
172 struct ice_q_vector *q_vector; /* Backreference to associated vector */
175 struct ice_tx_buf *tx_buf;
176 struct ice_rx_buf *rx_buf;
178 /* CL2 - 2nd cacheline starts here */
179 u16 q_index; /* Queue number of ring */
180 u16 q_handle; /* Queue handle per TC */
182 u8 ring_active:1; /* is ring online or not */
184 u16 count; /* Number of descriptors */
185 u16 reg_idx; /* HW register index of the ring */
187 /* used in interrupt processing */
193 struct ice_q_stats stats;
194 struct u64_stats_sync syncp;
196 struct ice_txq_stats tx_stats;
197 struct ice_rxq_stats rx_stats;
200 struct rcu_head rcu; /* to avoid race on free */
201 /* CLX - the below items are only accessed infrequently and should be
202 * in their own cache line if possible
204 dma_addr_t dma; /* physical address of ring */
205 unsigned int size; /* length of descriptor ring in bytes */
206 u32 txq_teid; /* Added Tx queue TEID */
209 u8 dcb_tc; /* Traffic class of ring */
210 #endif /* CONFIG_DCB */
211 } ____cacheline_internodealigned_in_smp;
213 struct ice_ring_container {
214 /* head of linked-list of rings */
215 struct ice_ring *ring;
216 unsigned long next_update; /* jiffies value of next queue update */
217 unsigned int total_bytes; /* total bytes processed this int */
218 unsigned int total_pkts; /* total packets processed this int */
219 u16 itr_idx; /* index in the interrupt vector */
220 u16 target_itr; /* value in usecs divided by the hw->itr_gran */
221 u16 current_itr; /* value in usecs divided by the hw->itr_gran */
222 /* high bit set means dynamic ITR, rest is used to store user
223 * readable ITR value in usecs and must be converted before programming
229 /* iterator for handling rings in ring container */
230 #define ice_for_each_ring(pos, head) \
231 for (pos = (head).ring; pos; pos = pos->next)
233 bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
234 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
235 void ice_clean_tx_ring(struct ice_ring *tx_ring);
236 void ice_clean_rx_ring(struct ice_ring *rx_ring);
237 int ice_setup_tx_ring(struct ice_ring *tx_ring);
238 int ice_setup_rx_ring(struct ice_ring *rx_ring);
239 void ice_free_tx_ring(struct ice_ring *tx_ring);
240 void ice_free_rx_ring(struct ice_ring *rx_ring);
241 int ice_napi_poll(struct napi_struct *napi, int budget);
243 #endif /* _ICE_TXRX_H_ */