2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/acpi.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/mfd/syscon.h>
23 #include <linux/interrupt.h>
24 #include <linux/cpumask.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/phy.h>
32 #include <linux/phy/phy.h>
33 #include <linux/clk.h>
34 #include <linux/hrtimer.h>
35 #include <linux/ktime.h>
36 #include <linux/regmap.h>
37 #include <uapi/linux/ppp_defs.h>
43 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46 #define MVPP2_RX_FIFO_INIT_REG 0x64
47 #define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
48 #define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
50 /* RX DMA Top Registers */
51 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
52 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
53 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
54 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
55 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
56 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
57 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
58 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
59 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
60 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
61 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
62 #define MVPP2_RXQ_POOL_LONG_OFFS 24
63 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
64 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
65 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
66 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
67 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
70 #define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
71 #define MVPP2_DSA_EXTENDED BIT(5)
73 /* Parser Registers */
74 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
75 #define MVPP2_PRS_PORT_LU_MAX 0xf
76 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
77 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
78 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
79 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
80 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
81 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
82 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
83 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
84 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
85 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
86 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
87 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
88 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
89 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
90 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
93 #define MVPP22_RSS_INDEX 0x1500
94 #define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
95 #define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
96 #define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
97 #define MVPP22_RSS_TABLE_ENTRY 0x1508
98 #define MVPP22_RSS_TABLE 0x1510
99 #define MVPP22_RSS_TABLE_POINTER(p) (p)
100 #define MVPP22_RSS_WIDTH 0x150c
102 /* Classifier Registers */
103 #define MVPP2_CLS_MODE_REG 0x1800
104 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
105 #define MVPP2_CLS_PORT_WAY_REG 0x1810
106 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
107 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
108 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
109 #define MVPP2_CLS_LKP_TBL_REG 0x1818
110 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
111 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
112 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
113 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
114 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
115 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
116 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
117 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
118 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
119 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
120 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
121 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
123 /* Descriptor Manager Top Registers */
124 #define MVPP2_RXQ_NUM_REG 0x2040
125 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
126 #define MVPP22_DESC_ADDR_OFFS 8
127 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
128 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
129 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
130 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
131 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
132 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
133 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
134 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
135 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
136 #define MVPP2_RXQ_THRESH_REG 0x204c
137 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
138 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
139 #define MVPP2_RXQ_INDEX_REG 0x2050
140 #define MVPP2_TXQ_NUM_REG 0x2080
141 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
142 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
143 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
144 #define MVPP2_TXQ_THRESH_REG 0x2094
145 #define MVPP2_TXQ_THRESH_OFFSET 16
146 #define MVPP2_TXQ_THRESH_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
148 #define MVPP2_TXQ_INDEX_REG 0x2098
149 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
150 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
151 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
152 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
153 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
154 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
155 #define MVPP2_TXQ_PENDING_REG 0x20a0
156 #define MVPP2_TXQ_PENDING_MASK 0x3fff
157 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
158 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
159 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
160 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
161 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
162 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
163 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
164 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
165 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
166 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
167 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
168 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
169 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
170 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
171 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
172 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
173 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
175 /* MBUS bridge registers */
176 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
177 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
178 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
179 #define MVPP2_BASE_ADDR_ENABLE 0x4060
181 /* AXI Bridge Registers */
182 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
183 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
184 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
185 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
186 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
187 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
188 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
189 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
190 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
191 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
192 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
193 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
195 /* Values for AXI Bridge registers */
196 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
197 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
199 #define MVPP22_AXI_CODE_CACHE_OFFS 0
200 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
202 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
203 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
204 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
206 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
207 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
209 /* Interrupt Cause and Mask registers */
210 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
211 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
213 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
214 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
215 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
217 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
218 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
219 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
220 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
222 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
223 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
225 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
226 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
227 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
228 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
230 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
231 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
232 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
233 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
234 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
235 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
236 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
237 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
238 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
239 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
240 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
241 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
242 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
243 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
244 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
245 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
246 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
247 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
248 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
250 /* Buffer Manager registers */
251 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
252 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
253 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
254 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
255 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
256 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
257 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
258 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
259 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
260 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
261 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
262 #define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
263 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
264 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
265 #define MVPP2_BM_START_MASK BIT(0)
266 #define MVPP2_BM_STOP_MASK BIT(1)
267 #define MVPP2_BM_STATE_MASK BIT(4)
268 #define MVPP2_BM_LOW_THRESH_OFFS 8
269 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
270 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
271 MVPP2_BM_LOW_THRESH_OFFS)
272 #define MVPP2_BM_HIGH_THRESH_OFFS 16
273 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
274 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
275 MVPP2_BM_HIGH_THRESH_OFFS)
276 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
277 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
278 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
279 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
280 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
281 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
282 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
283 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
284 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
285 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
286 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
287 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
288 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
289 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
290 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
291 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
292 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
293 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
294 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
295 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
296 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
297 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
298 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
300 /* TX Scheduler registers */
301 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
302 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
303 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
304 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
305 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
306 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
307 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
308 #define MVPP2_TXP_MTU_MAX 0x7FFFF
309 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
310 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
311 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
312 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
313 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
314 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
315 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
316 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
317 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
318 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
319 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
320 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
321 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
322 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
324 /* TX general registers */
325 #define MVPP2_TX_SNOOP_REG 0x8800
326 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
327 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
330 #define MVPP2_SRC_ADDR_MIDDLE 0x24
331 #define MVPP2_SRC_ADDR_HIGH 0x28
332 #define MVPP2_PHY_AN_CFG0_REG 0x34
333 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
334 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
335 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
337 /* Per-port registers */
338 #define MVPP2_GMAC_CTRL_0_REG 0x0
339 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
340 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
341 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
342 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
343 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
344 #define MVPP2_GMAC_CTRL_1_REG 0x4
345 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
346 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
347 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
348 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
349 #define MVPP2_GMAC_SA_LOW_OFFS 7
350 #define MVPP2_GMAC_CTRL_2_REG 0x8
351 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
352 #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
353 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
354 #define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
355 #define MVPP2_GMAC_DISABLE_PADDING BIT(5)
356 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
357 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
358 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
359 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
360 #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
361 #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
362 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
363 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
364 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
365 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
366 #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
367 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
368 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
369 #define MVPP2_GMAC_STATUS0 0x10
370 #define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
371 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
372 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
373 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
374 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
375 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
376 #define MVPP22_GMAC_INT_STAT 0x20
377 #define MVPP22_GMAC_INT_STAT_LINK BIT(1)
378 #define MVPP22_GMAC_INT_MASK 0x24
379 #define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
380 #define MVPP22_GMAC_CTRL_4_REG 0x90
381 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
382 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
383 #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
384 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
385 #define MVPP22_GMAC_INT_SUM_MASK 0xa4
386 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
388 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
389 * relative to port->base.
391 #define MVPP22_XLG_CTRL0_REG 0x100
392 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
393 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
394 #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
395 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
396 #define MVPP22_XLG_CTRL1_REG 0x104
397 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
398 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
399 #define MVPP22_XLG_STATUS 0x10c
400 #define MVPP22_XLG_STATUS_LINK_UP BIT(0)
401 #define MVPP22_XLG_INT_STAT 0x114
402 #define MVPP22_XLG_INT_STAT_LINK BIT(1)
403 #define MVPP22_XLG_INT_MASK 0x118
404 #define MVPP22_XLG_INT_MASK_LINK BIT(1)
405 #define MVPP22_XLG_CTRL3_REG 0x11c
406 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
407 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
408 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
409 #define MVPP22_XLG_EXT_INT_MASK 0x15c
410 #define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
411 #define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
412 #define MVPP22_XLG_CTRL4_REG 0x184
413 #define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
414 #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
415 #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
417 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
418 #define MVPP22_SMI_MISC_CFG_REG 0x1204
419 #define MVPP22_SMI_POLLING_EN BIT(10)
421 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
423 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
425 /* Descriptor ring Macros */
426 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
427 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
429 /* XPCS registers. PPv2.2 only */
430 #define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
431 #define MVPP22_MPCS_CTRL 0x14
432 #define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
433 #define MVPP22_MPCS_CLK_RESET 0x14c
434 #define MAC_CLK_RESET_SD_TX BIT(0)
435 #define MAC_CLK_RESET_SD_RX BIT(1)
436 #define MAC_CLK_RESET_MAC BIT(2)
437 #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
438 #define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
440 /* XPCS registers. PPv2.2 only */
441 #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
442 #define MVPP22_XPCS_CFG0 0x0
443 #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
444 #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
446 /* System controller registers. Accessed through a regmap. */
447 #define GENCONF_SOFT_RESET1 0x1108
448 #define GENCONF_SOFT_RESET1_GOP BIT(6)
449 #define GENCONF_PORT_CTRL0 0x1110
450 #define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
451 #define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
452 #define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
453 #define GENCONF_PORT_CTRL1 0x1114
454 #define GENCONF_PORT_CTRL1_EN(p) BIT(p)
455 #define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
456 #define GENCONF_CTRL0 0x1120
457 #define GENCONF_CTRL0_PORT0_RGMII BIT(0)
458 #define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
459 #define GENCONF_CTRL0_PORT1_RGMII BIT(2)
461 /* Various constants */
464 #define MVPP2_TXDONE_COAL_PKTS_THRESH 64
465 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
466 #define MVPP2_TXDONE_COAL_USEC 1000
467 #define MVPP2_RX_COAL_PKTS 32
468 #define MVPP2_RX_COAL_USEC 64
470 /* The two bytes Marvell header. Either contains a special value used
471 * by Marvell switches when a specific hardware mode is enabled (not
472 * supported by this driver) or is filled automatically by zeroes on
473 * the RX side. Those two bytes being at the front of the Ethernet
474 * header, they allow to have the IP header aligned on a 4 bytes
475 * boundary automatically: the hardware skips those two bytes on its
478 #define MVPP2_MH_SIZE 2
479 #define MVPP2_ETH_TYPE_LEN 2
480 #define MVPP2_PPPOE_HDR_SIZE 8
481 #define MVPP2_VLAN_TAG_LEN 4
482 #define MVPP2_VLAN_TAG_EDSA_LEN 8
484 /* Lbtd 802.3 type */
485 #define MVPP2_IP_LBDT_TYPE 0xfffa
487 #define MVPP2_TX_CSUM_MAX_SIZE 9800
489 /* Timeout constants */
490 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
491 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
493 #define MVPP2_TX_MTU_MAX 0x7ffff
495 /* Maximum number of T-CONTs of PON port */
496 #define MVPP2_MAX_TCONT 16
498 /* Maximum number of supported ports */
499 #define MVPP2_MAX_PORTS 4
501 /* Maximum number of TXQs used by single port */
502 #define MVPP2_MAX_TXQ 8
504 /* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
505 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
506 * multiply this value by two to count the maximum number of skb descs needed.
508 #define MVPP2_MAX_TSO_SEGS 300
509 #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
511 /* Dfault number of RXQs in use */
512 #define MVPP2_DEFAULT_RXQ 4
514 /* Max number of Rx descriptors */
515 #define MVPP2_MAX_RXD_MAX 1024
516 #define MVPP2_MAX_RXD_DFLT 128
518 /* Max number of Tx descriptors */
519 #define MVPP2_MAX_TXD_MAX 2048
520 #define MVPP2_MAX_TXD_DFLT 1024
522 /* Amount of Tx descriptors that can be reserved at once by CPU */
523 #define MVPP2_CPU_DESC_CHUNK 64
525 /* Max number of Tx descriptors in each aggregated queue */
526 #define MVPP2_AGGR_TXQ_SIZE 256
528 /* Descriptor aligned size */
529 #define MVPP2_DESC_ALIGNED_SIZE 32
531 /* Descriptor alignment mask */
532 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
534 /* RX FIFO constants */
535 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
536 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
537 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
538 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
539 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
540 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
541 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
543 /* TX FIFO constants */
544 #define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
545 #define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
546 #define MVPP2_TX_FIFO_THRESHOLD_MIN 256
547 #define MVPP2_TX_FIFO_THRESHOLD_10KB \
548 (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
549 #define MVPP2_TX_FIFO_THRESHOLD_3KB \
550 (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
552 /* RX buffer constants */
553 #define MVPP2_SKB_SHINFO_SIZE \
554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
556 #define MVPP2_RX_PKT_SIZE(mtu) \
557 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
558 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
560 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
561 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
562 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
563 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
565 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
567 /* IPv6 max L3 address size */
568 #define MVPP2_MAX_L3_ADDR_SIZE 16
571 #define MVPP2_F_LOOPBACK BIT(0)
573 /* Marvell tag types */
574 enum mvpp2_tag_type {
575 MVPP2_TAG_TYPE_NONE = 0,
576 MVPP2_TAG_TYPE_MH = 1,
577 MVPP2_TAG_TYPE_DSA = 2,
578 MVPP2_TAG_TYPE_EDSA = 3,
579 MVPP2_TAG_TYPE_VLAN = 4,
580 MVPP2_TAG_TYPE_LAST = 5
583 /* Parser constants */
584 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
585 #define MVPP2_PRS_TCAM_WORDS 6
586 #define MVPP2_PRS_SRAM_WORDS 4
587 #define MVPP2_PRS_FLOW_ID_SIZE 64
588 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
589 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
590 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
591 #define MVPP2_PRS_IPV4_HEAD 0x40
592 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
593 #define MVPP2_PRS_IPV4_MC 0xe0
594 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
595 #define MVPP2_PRS_IPV4_BC_MASK 0xff
596 #define MVPP2_PRS_IPV4_IHL 0x5
597 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
598 #define MVPP2_PRS_IPV6_MC 0xff
599 #define MVPP2_PRS_IPV6_MC_MASK 0xff
600 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
601 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
602 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
603 #define MVPP2_PRS_DBL_VLANS_MAX 100
604 #define MVPP2_PRS_CAST_MASK BIT(0)
605 #define MVPP2_PRS_MCAST_VAL BIT(0)
606 #define MVPP2_PRS_UCAST_VAL 0x0
609 * - lookup ID - 4 bits
611 * - additional information - 1 byte
612 * - header data - 8 bytes
613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
615 #define MVPP2_PRS_AI_BITS 8
616 #define MVPP2_PRS_PORT_MASK 0xff
617 #define MVPP2_PRS_LU_MASK 0xf
618 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
619 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
620 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
621 (((offs) * 2) - ((offs) % 2) + 2)
622 #define MVPP2_PRS_TCAM_AI_BYTE 16
623 #define MVPP2_PRS_TCAM_PORT_BYTE 17
624 #define MVPP2_PRS_TCAM_LU_BYTE 20
625 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
626 #define MVPP2_PRS_TCAM_INV_WORD 5
628 #define MVPP2_PRS_VID_TCAM_BYTE 2
630 /* TCAM range for unicast and multicast filtering. We have 25 entries per port,
631 * with 4 dedicated to UC filtering and the rest to multicast filtering.
632 * Additionnally we reserve one entry for the broadcast address, and one for
633 * each port's own address.
635 #define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
636 #define MVPP2_PRS_MAC_RANGE_SIZE 80
638 /* Number of entries per port dedicated to UC and MC filtering */
639 #define MVPP2_PRS_MAC_UC_FILT_MAX 4
640 #define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
641 MVPP2_PRS_MAC_UC_FILT_MAX)
643 /* There is a TCAM range reserved for VLAN filtering entries, range size is 33
644 * 10 VLAN ID filter entries per port
645 * 1 default VLAN filter entry per port
646 * It is assumed that there are 3 ports for filter, not including loopback port
648 #define MVPP2_PRS_VLAN_FILT_MAX 11
649 #define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
651 #define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
652 #define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
654 /* Tcam entries ID */
655 #define MVPP2_PE_DROP_ALL 0
656 #define MVPP2_PE_FIRST_FREE_TID 1
658 /* MAC filtering range */
659 #define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
660 #define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
661 MVPP2_PRS_MAC_RANGE_SIZE + 1)
662 /* VLAN filtering range */
663 #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664 #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
666 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
667 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
668 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
670 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
671 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
672 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
673 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
674 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
675 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
676 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
677 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
678 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
679 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
680 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
681 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
682 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
683 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
684 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
685 #define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
686 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
687 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
688 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
690 #define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
691 #define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
692 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
694 #define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
695 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
696 #define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
697 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
698 /* Index of default vid filter for given port */
699 #define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
700 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
703 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
705 #define MVPP2_PRS_SRAM_RI_OFFS 0
706 #define MVPP2_PRS_SRAM_RI_WORD 0
707 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
708 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
709 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
710 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
711 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
712 #define MVPP2_PRS_SRAM_UDF_OFFS 73
713 #define MVPP2_PRS_SRAM_UDF_BITS 8
714 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
715 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
716 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
717 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
718 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
719 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
720 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
721 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
722 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
723 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
724 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
725 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
726 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
727 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
728 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
729 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
730 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
731 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
732 #define MVPP2_PRS_SRAM_AI_OFFS 90
733 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
734 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
735 #define MVPP2_PRS_SRAM_AI_MASK 0xff
736 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
737 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
738 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
739 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
741 /* Sram result info bits assignment */
742 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
743 #define MVPP2_PRS_RI_DSA_MASK 0x2
744 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
745 #define MVPP2_PRS_RI_VLAN_NONE 0x0
746 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
747 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
748 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
749 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
750 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
751 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
752 #define MVPP2_PRS_RI_L2_UCAST 0x0
753 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
754 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
755 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
756 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
757 #define MVPP2_PRS_RI_L3_UN 0x0
758 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
759 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
760 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
761 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
762 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
763 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
764 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
765 #define MVPP2_PRS_RI_L3_UCAST 0x0
766 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
767 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
768 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
769 #define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
770 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
771 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
772 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
773 #define MVPP2_PRS_RI_L4_TCP BIT(22)
774 #define MVPP2_PRS_RI_L4_UDP BIT(23)
775 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
776 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
777 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
778 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
780 /* Sram additional info bits assignment */
781 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
782 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
783 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
784 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
785 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
786 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
787 #define MVPP2_PRS_SINGLE_VLAN_AI 0
788 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
789 #define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
792 #define MVPP2_PRS_TAGGED true
793 #define MVPP2_PRS_UNTAGGED false
794 #define MVPP2_PRS_EDSA true
795 #define MVPP2_PRS_DSA false
797 /* MAC entries, shadow udf */
799 MVPP2_PRS_UDF_MAC_DEF,
800 MVPP2_PRS_UDF_MAC_RANGE,
801 MVPP2_PRS_UDF_L2_DEF,
802 MVPP2_PRS_UDF_L2_DEF_COPY,
803 MVPP2_PRS_UDF_L2_USER,
807 enum mvpp2_prs_lookup {
822 enum mvpp2_prs_l2_cast {
823 MVPP2_PRS_L2_UNI_CAST,
824 MVPP2_PRS_L2_MULTI_CAST,
828 enum mvpp2_prs_l3_cast {
829 MVPP2_PRS_L3_UNI_CAST,
830 MVPP2_PRS_L3_MULTI_CAST,
831 MVPP2_PRS_L3_BROAD_CAST
834 /* Classifier constants */
835 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
836 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
837 #define MVPP2_CLS_LKP_TBL_SIZE 64
838 #define MVPP2_CLS_RX_QUEUES 256
841 #define MVPP22_RSS_TABLE_ENTRIES 32
844 #define MVPP2_BM_JUMBO_BUF_NUM 512
845 #define MVPP2_BM_LONG_BUF_NUM 1024
846 #define MVPP2_BM_SHORT_BUF_NUM 2048
847 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
848 #define MVPP2_BM_POOL_PTR_ALIGN 128
850 /* BM cookie (32 bits) definition */
851 #define MVPP2_BM_COOKIE_POOL_OFFS 8
852 #define MVPP2_BM_COOKIE_CPU_OFFS 24
854 #define MVPP2_BM_SHORT_FRAME_SIZE 512
855 #define MVPP2_BM_LONG_FRAME_SIZE 2048
856 #define MVPP2_BM_JUMBO_FRAME_SIZE 10240
857 /* BM short pool packet size
858 * These value assure that for SWF the total number
859 * of bytes allocated for each buffer will be 512
861 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
862 #define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
863 #define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
865 #define MVPP21_ADDR_SPACE_SZ 0
866 #define MVPP22_ADDR_SPACE_SZ SZ_64K
868 #define MVPP2_MAX_THREADS 8
869 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
871 enum mvpp2_bm_pool_log_num {
881 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
883 /* GMAC MIB Counters register definitions */
884 #define MVPP21_MIB_COUNTERS_OFFSET 0x1000
885 #define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
886 #define MVPP22_MIB_COUNTERS_OFFSET 0x0
887 #define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
889 #define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
890 #define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
891 #define MVPP2_MIB_CRC_ERRORS_SENT 0xc
892 #define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
893 #define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
894 #define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
895 #define MVPP2_MIB_FRAMES_64_OCTETS 0x20
896 #define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
897 #define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
898 #define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
899 #define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
900 #define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
901 #define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
902 #define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
903 #define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
904 #define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
905 #define MVPP2_MIB_FC_SENT 0x54
906 #define MVPP2_MIB_FC_RCVD 0x58
907 #define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
908 #define MVPP2_MIB_UNDERSIZE_RCVD 0x60
909 #define MVPP2_MIB_FRAGMENTS_RCVD 0x64
910 #define MVPP2_MIB_OVERSIZE_RCVD 0x68
911 #define MVPP2_MIB_JABBER_RCVD 0x6c
912 #define MVPP2_MIB_MAC_RCV_ERROR 0x70
913 #define MVPP2_MIB_BAD_CRC_EVENT 0x74
914 #define MVPP2_MIB_COLLISION 0x78
915 #define MVPP2_MIB_LATE_COLLISION 0x7c
917 #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
921 /* Shared Packet Processor resources */
923 /* Shared registers' base addresses */
924 void __iomem *lms_base;
925 void __iomem *iface_base;
927 /* On PPv2.2, each "software thread" can access the base
928 * register through a separate address space, each 64 KB apart
929 * from each other. Typically, such address spaces will be
932 void __iomem *swth_base[MVPP2_MAX_THREADS];
934 /* On PPv2.2, some port control registers are located into the system
935 * controller space. These registers are accessible through a regmap.
937 struct regmap *sysctrl_base;
945 /* List of pointers to port structures */
947 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
949 /* Aggregated TXQs */
950 struct mvpp2_tx_queue *aggr_txqs;
953 struct mvpp2_bm_pool *bm_pools;
955 /* PRS shadow table */
956 struct mvpp2_prs_shadow *prs_shadow;
957 /* PRS auxiliary table for double vlan entries control */
958 bool *prs_double_vlans;
964 enum { MVPP21, MVPP22 } hw_version;
966 /* Maximum number of RXQs per port */
967 unsigned int max_port_rxqs;
969 /* Workqueue to gather hardware statistics */
971 struct workqueue_struct *stats_queue;
974 struct mvpp2_pcpu_stats {
975 struct u64_stats_sync syncp;
982 /* Per-CPU port control */
983 struct mvpp2_port_pcpu {
984 struct hrtimer tx_done_timer;
985 bool timer_scheduled;
986 /* Tasklet for egress finalization */
987 struct tasklet_struct tx_done_tasklet;
990 struct mvpp2_queue_vector {
992 struct napi_struct napi;
993 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
998 u32 pending_cause_rx;
999 struct mvpp2_port *port;
1005 /* Index of the port from the "group of ports" complex point
1014 /* Firmware node associated to the port */
1015 struct fwnode_handle *fwnode;
1017 /* Per-port registers' base address */
1019 void __iomem *stats_base;
1021 struct mvpp2_rx_queue **rxqs;
1023 struct mvpp2_tx_queue **txqs;
1025 struct net_device *dev;
1029 /* Per-CPU port control */
1030 struct mvpp2_port_pcpu __percpu *pcpu;
1033 unsigned long flags;
1037 struct mvpp2_pcpu_stats __percpu *stats;
1040 /* Per-port work and its lock to gather hardware statistics */
1041 struct mutex gather_stats_lock;
1042 struct delayed_work stats_work;
1044 phy_interface_t phy_interface;
1045 struct device_node *phy_node;
1048 unsigned int duplex;
1051 struct mvpp2_bm_pool *pool_long;
1052 struct mvpp2_bm_pool *pool_short;
1054 /* Index of first port's physical RXQ */
1057 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1058 unsigned int nqvecs;
1064 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1065 * layout of the transmit and reception DMA descriptors, and their
1066 * layout is therefore defined by the hardware design
1069 #define MVPP2_TXD_L3_OFF_SHIFT 0
1070 #define MVPP2_TXD_IP_HLEN_SHIFT 8
1071 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1072 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1073 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1074 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
1075 #define MVPP2_TXD_L4_UDP BIT(24)
1076 #define MVPP2_TXD_L3_IP6 BIT(26)
1077 #define MVPP2_TXD_L_DESC BIT(28)
1078 #define MVPP2_TXD_F_DESC BIT(29)
1080 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
1081 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1082 #define MVPP2_RXD_ERR_CRC 0x0
1083 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
1084 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1085 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
1086 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1087 #define MVPP2_RXD_HWF_SYNC BIT(21)
1088 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
1089 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1090 #define MVPP2_RXD_L4_TCP BIT(25)
1091 #define MVPP2_RXD_L4_UDP BIT(26)
1092 #define MVPP2_RXD_L3_IP4 BIT(28)
1093 #define MVPP2_RXD_L3_IP6 BIT(30)
1094 #define MVPP2_RXD_BUF_HDR BIT(31)
1096 /* HW TX descriptor for PPv2.1 */
1097 struct mvpp21_tx_desc {
1098 u32 command; /* Options used by HW for packet transmitting.*/
1099 u8 packet_offset; /* the offset from the buffer beginning */
1100 u8 phys_txq; /* destination queue ID */
1101 u16 data_size; /* data size of transmitted packet in bytes */
1102 u32 buf_dma_addr; /* physical addr of transmitted buffer */
1103 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1104 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1105 u32 reserved2; /* reserved (for future use) */
1108 /* HW RX descriptor for PPv2.1 */
1109 struct mvpp21_rx_desc {
1110 u32 status; /* info about received packet */
1111 u16 reserved1; /* parser_info (for future use, PnC) */
1112 u16 data_size; /* size of received packet in bytes */
1113 u32 buf_dma_addr; /* physical address of the buffer */
1114 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1115 u16 reserved2; /* gem_port_id (for future use, PON) */
1116 u16 reserved3; /* csum_l4 (for future use, PnC) */
1117 u8 reserved4; /* bm_qset (for future use, BM) */
1119 u16 reserved6; /* classify_info (for future use, PnC) */
1120 u32 reserved7; /* flow_id (for future use, PnC) */
1124 /* HW TX descriptor for PPv2.2 */
1125 struct mvpp22_tx_desc {
1131 u64 buf_dma_addr_ptp;
1132 u64 buf_cookie_misc;
1135 /* HW RX descriptor for PPv2.2 */
1136 struct mvpp22_rx_desc {
1142 u64 buf_dma_addr_key_hash;
1143 u64 buf_cookie_misc;
1146 /* Opaque type used by the driver to manipulate the HW TX and RX
1149 struct mvpp2_tx_desc {
1151 struct mvpp21_tx_desc pp21;
1152 struct mvpp22_tx_desc pp22;
1156 struct mvpp2_rx_desc {
1158 struct mvpp21_rx_desc pp21;
1159 struct mvpp22_rx_desc pp22;
1163 struct mvpp2_txq_pcpu_buf {
1164 /* Transmitted SKB */
1165 struct sk_buff *skb;
1167 /* Physical address of transmitted buffer */
1170 /* Size transmitted */
1174 /* Per-CPU Tx queue control */
1175 struct mvpp2_txq_pcpu {
1178 /* Number of Tx DMA descriptors in the descriptor ring */
1181 /* Number of currently used Tx DMA descriptor in the
1189 /* Number of Tx DMA descriptors reserved for each CPU */
1192 /* Infos about transmitted buffers */
1193 struct mvpp2_txq_pcpu_buf *buffs;
1195 /* Index of last TX DMA descriptor that was inserted */
1198 /* Index of the TX DMA descriptor to be cleaned up */
1201 /* DMA buffer for TSO headers */
1203 dma_addr_t tso_headers_dma;
1206 struct mvpp2_tx_queue {
1207 /* Physical number of this Tx queue */
1210 /* Logical number of this Tx queue */
1213 /* Number of Tx DMA descriptors in the descriptor ring */
1216 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1219 /* Per-CPU control of physical Tx queues */
1220 struct mvpp2_txq_pcpu __percpu *pcpu;
1224 /* Virtual address of thex Tx DMA descriptors array */
1225 struct mvpp2_tx_desc *descs;
1227 /* DMA address of the Tx DMA descriptors array */
1228 dma_addr_t descs_dma;
1230 /* Index of the last Tx DMA descriptor */
1233 /* Index of the next Tx DMA descriptor to process */
1234 int next_desc_to_proc;
1237 struct mvpp2_rx_queue {
1238 /* RX queue number, in the range 0-31 for physical RXQs */
1241 /* Num of rx descriptors in the rx descriptor ring */
1247 /* Virtual address of the RX DMA descriptors array */
1248 struct mvpp2_rx_desc *descs;
1250 /* DMA address of the RX DMA descriptors array */
1251 dma_addr_t descs_dma;
1253 /* Index of the last RX DMA descriptor */
1256 /* Index of the next RX DMA descriptor to process */
1257 int next_desc_to_proc;
1259 /* ID of port to which physical RXQ is mapped */
1262 /* Port's logic RXQ number to which physical RXQ is mapped */
1266 union mvpp2_prs_tcam_entry {
1267 u32 word[MVPP2_PRS_TCAM_WORDS];
1268 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1271 union mvpp2_prs_sram_entry {
1272 u32 word[MVPP2_PRS_SRAM_WORDS];
1273 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1276 struct mvpp2_prs_entry {
1278 union mvpp2_prs_tcam_entry tcam;
1279 union mvpp2_prs_sram_entry sram;
1282 struct mvpp2_prs_shadow {
1289 /* User defined offset */
1297 struct mvpp2_cls_flow_entry {
1299 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1302 struct mvpp2_cls_lookup_entry {
1308 struct mvpp2_bm_pool {
1309 /* Pool number in the range 0-7 */
1312 /* Buffer Pointers Pool External (BPPE) size */
1314 /* BPPE size in bytes */
1316 /* Number of buffers for this pool */
1318 /* Pool buffer size */
1324 /* BPPE virtual base address */
1326 /* BPPE DMA base address */
1327 dma_addr_t dma_addr;
1329 /* Ports using BM pool */
1333 #define IS_TSO_HEADER(txq_pcpu, addr) \
1334 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1335 (addr) < (txq_pcpu)->tso_headers_dma + \
1336 (txq_pcpu)->size * TSO_HEADER_SIZE)
1339 #define MVPP2_QDIST_SINGLE_MODE 0
1340 #define MVPP2_QDIST_MULTI_MODE 1
1342 static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1344 module_param(queue_mode, int, 0444);
1345 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1347 #define MVPP2_DRIVER_NAME "mvpp2"
1348 #define MVPP2_DRIVER_VERSION "1.0"
1350 /* Utility/helper methods */
1352 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1354 writel(data, priv->swth_base[0] + offset);
1357 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1359 return readl(priv->swth_base[0] + offset);
1362 /* These accessors should be used to access:
1364 * - per-CPU registers, where each CPU has its own copy of the
1367 * MVPP2_BM_VIRT_ALLOC_REG
1368 * MVPP2_BM_ADDR_HIGH_ALLOC
1369 * MVPP22_BM_ADDR_HIGH_RLS_REG
1370 * MVPP2_BM_VIRT_RLS_REG
1371 * MVPP2_ISR_RX_TX_CAUSE_REG
1372 * MVPP2_ISR_RX_TX_MASK_REG
1374 * MVPP2_AGGR_TXQ_UPDATE_REG
1375 * MVPP2_TXQ_RSVD_REQ_REG
1376 * MVPP2_TXQ_RSVD_RSLT_REG
1377 * MVPP2_TXQ_SENT_REG
1380 * - global registers that must be accessed through a specific CPU
1381 * window, because they are related to an access to a per-CPU
1384 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1385 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1386 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1387 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1388 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1389 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1390 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1391 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1392 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1393 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1394 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1395 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1396 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1398 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1399 u32 offset, u32 data)
1401 writel(data, priv->swth_base[cpu] + offset);
1404 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1407 return readl(priv->swth_base[cpu] + offset);
1410 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1411 struct mvpp2_tx_desc *tx_desc)
1413 if (port->priv->hw_version == MVPP21)
1414 return tx_desc->pp21.buf_dma_addr;
1416 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1419 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1420 struct mvpp2_tx_desc *tx_desc,
1421 dma_addr_t dma_addr)
1423 dma_addr_t addr, offset;
1425 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1426 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1428 if (port->priv->hw_version == MVPP21) {
1429 tx_desc->pp21.buf_dma_addr = addr;
1430 tx_desc->pp21.packet_offset = offset;
1432 u64 val = (u64)addr;
1434 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1435 tx_desc->pp22.buf_dma_addr_ptp |= val;
1436 tx_desc->pp22.packet_offset = offset;
1440 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1441 struct mvpp2_tx_desc *tx_desc)
1443 if (port->priv->hw_version == MVPP21)
1444 return tx_desc->pp21.data_size;
1446 return tx_desc->pp22.data_size;
1449 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1450 struct mvpp2_tx_desc *tx_desc,
1453 if (port->priv->hw_version == MVPP21)
1454 tx_desc->pp21.data_size = size;
1456 tx_desc->pp22.data_size = size;
1459 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1460 struct mvpp2_tx_desc *tx_desc,
1463 if (port->priv->hw_version == MVPP21)
1464 tx_desc->pp21.phys_txq = txq;
1466 tx_desc->pp22.phys_txq = txq;
1469 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1470 struct mvpp2_tx_desc *tx_desc,
1471 unsigned int command)
1473 if (port->priv->hw_version == MVPP21)
1474 tx_desc->pp21.command = command;
1476 tx_desc->pp22.command = command;
1479 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1480 struct mvpp2_tx_desc *tx_desc)
1482 if (port->priv->hw_version == MVPP21)
1483 return tx_desc->pp21.packet_offset;
1485 return tx_desc->pp22.packet_offset;
1488 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1489 struct mvpp2_rx_desc *rx_desc)
1491 if (port->priv->hw_version == MVPP21)
1492 return rx_desc->pp21.buf_dma_addr;
1494 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1497 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1498 struct mvpp2_rx_desc *rx_desc)
1500 if (port->priv->hw_version == MVPP21)
1501 return rx_desc->pp21.buf_cookie;
1503 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1506 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1507 struct mvpp2_rx_desc *rx_desc)
1509 if (port->priv->hw_version == MVPP21)
1510 return rx_desc->pp21.data_size;
1512 return rx_desc->pp22.data_size;
1515 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1516 struct mvpp2_rx_desc *rx_desc)
1518 if (port->priv->hw_version == MVPP21)
1519 return rx_desc->pp21.status;
1521 return rx_desc->pp22.status;
1524 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1526 txq_pcpu->txq_get_index++;
1527 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1528 txq_pcpu->txq_get_index = 0;
1531 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1532 struct mvpp2_txq_pcpu *txq_pcpu,
1533 struct sk_buff *skb,
1534 struct mvpp2_tx_desc *tx_desc)
1536 struct mvpp2_txq_pcpu_buf *tx_buf =
1537 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1539 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1540 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1541 mvpp2_txdesc_offset_get(port, tx_desc);
1542 txq_pcpu->txq_put_index++;
1543 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1544 txq_pcpu->txq_put_index = 0;
1547 /* Get number of physical egress port */
1548 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1550 return MVPP2_MAX_TCONT + port->id;
1553 /* Get number of physical TXQ */
1554 static inline int mvpp2_txq_phys(int port, int txq)
1556 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1559 /* Parser configuration routines */
1561 /* Update parser tcam and sram hw entries */
1562 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1566 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1569 /* Clear entry invalidation bit */
1570 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1572 /* Write tcam index - indirect access */
1573 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1574 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1575 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1577 /* Write sram index - indirect access */
1578 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1579 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1580 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1585 /* Initialize tcam entry from hw */
1586 static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
1587 struct mvpp2_prs_entry *pe, int tid)
1591 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1594 memset(pe, 0, sizeof(*pe));
1597 /* Write tcam index - indirect access */
1598 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1600 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1601 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1602 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1603 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1605 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1606 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1608 /* Write sram index - indirect access */
1609 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1610 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1611 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1616 /* Invalidate tcam hw entry */
1617 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1619 /* Write index - indirect access */
1620 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1621 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1622 MVPP2_PRS_TCAM_INV_MASK);
1625 /* Enable shadow table entry and set its lookup ID */
1626 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1628 priv->prs_shadow[index].valid = true;
1629 priv->prs_shadow[index].lu = lu;
1632 /* Update ri fields in shadow table entry */
1633 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1634 unsigned int ri, unsigned int ri_mask)
1636 priv->prs_shadow[index].ri_mask = ri_mask;
1637 priv->prs_shadow[index].ri = ri;
1640 /* Update lookup field in tcam sw entry */
1641 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1643 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1645 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1646 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1649 /* Update mask for single port in tcam sw entry */
1650 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1651 unsigned int port, bool add)
1653 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1656 pe->tcam.byte[enable_off] &= ~(1 << port);
1658 pe->tcam.byte[enable_off] |= 1 << port;
1661 /* Update port map in tcam sw entry */
1662 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1665 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1666 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1668 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1669 pe->tcam.byte[enable_off] &= ~port_mask;
1670 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1673 /* Obtain port map from tcam sw entry */
1674 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1676 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1678 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1681 /* Set byte of data and its enable bits in tcam sw entry */
1682 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1683 unsigned int offs, unsigned char byte,
1684 unsigned char enable)
1686 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1687 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1690 /* Get byte of data and its enable bits from tcam sw entry */
1691 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1692 unsigned int offs, unsigned char *byte,
1693 unsigned char *enable)
1695 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1696 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1699 /* Compare tcam data bytes with a pattern */
1700 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1703 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1706 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1707 if (tcam_data != data)
1712 /* Update ai bits in tcam sw entry */
1713 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1714 unsigned int bits, unsigned int enable)
1716 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1718 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1720 if (!(enable & BIT(i)))
1724 pe->tcam.byte[ai_idx] |= 1 << i;
1726 pe->tcam.byte[ai_idx] &= ~(1 << i);
1729 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1732 /* Get ai bits from tcam sw entry */
1733 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1735 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1738 /* Set ethertype in tcam sw entry */
1739 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1740 unsigned short ethertype)
1742 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1743 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1746 /* Set vid in tcam sw entry */
1747 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1750 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1751 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1754 /* Set bits in sram sw entry */
1755 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1758 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1761 /* Clear bits in sram sw entry */
1762 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1765 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1768 /* Update ri bits in sram sw entry */
1769 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1770 unsigned int bits, unsigned int mask)
1774 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1775 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1777 if (!(mask & BIT(i)))
1781 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1783 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1785 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1789 /* Obtain ri bits from sram sw entry */
1790 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1792 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1795 /* Update ai bits in sram sw entry */
1796 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1797 unsigned int bits, unsigned int mask)
1800 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1802 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1804 if (!(mask & BIT(i)))
1808 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1810 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1812 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1816 /* Read ai bits from sram sw entry */
1817 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1820 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1821 int ai_en_off = ai_off + 1;
1822 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1824 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1825 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1830 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1833 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1836 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1838 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1839 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1840 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1843 /* In the sram sw entry set sign and value of the next lookup offset
1844 * and the offset value generated to the classifier
1846 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1851 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1854 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1858 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1859 (unsigned char)shift;
1861 /* Reset and set operation */
1862 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1863 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1864 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1866 /* Set base offset as current */
1867 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1870 /* In the sram sw entry set sign and value of the user defined offset
1871 * generated to the classifier
1873 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1874 unsigned int type, int offset,
1879 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1880 offset = 0 - offset;
1882 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1886 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1887 MVPP2_PRS_SRAM_UDF_MASK);
1888 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1889 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1890 MVPP2_PRS_SRAM_UDF_BITS)] &=
1891 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1892 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1893 MVPP2_PRS_SRAM_UDF_BITS)] |=
1894 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1896 /* Set offset type */
1897 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1898 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1899 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1901 /* Set offset operation */
1902 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1903 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1904 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1906 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1907 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1908 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1909 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1911 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1912 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1913 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1915 /* Set base offset as current */
1916 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1919 /* Find parser flow entry */
1920 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1922 struct mvpp2_prs_entry *pe;
1925 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1928 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1930 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1931 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1934 if (!priv->prs_shadow[tid].valid ||
1935 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1938 mvpp2_prs_init_from_hw(priv, pe, tid);
1939 bits = mvpp2_prs_sram_ai_get(pe);
1941 /* Sram store classification lookup ID in AI bits [5:0] */
1942 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1950 /* Return first free tcam index, seeking from start to end */
1951 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1959 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1960 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1962 for (tid = start; tid <= end; tid++) {
1963 if (!priv->prs_shadow[tid].valid)
1970 /* Enable/disable dropping all mac da's */
1971 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1973 struct mvpp2_prs_entry pe;
1975 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1976 /* Entry exist - update port only */
1977 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
1979 /* Entry doesn't exist - create new */
1980 memset(&pe, 0, sizeof(pe));
1981 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1982 pe.index = MVPP2_PE_DROP_ALL;
1984 /* Non-promiscuous mode for all ports - DROP unknown packets */
1985 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1986 MVPP2_PRS_RI_DROP_MASK);
1988 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1989 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1991 /* Update shadow table */
1992 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1994 /* Mask all ports */
1995 mvpp2_prs_tcam_port_map_set(&pe, 0);
1998 /* Update port mask */
1999 mvpp2_prs_tcam_port_set(&pe, port, add);
2001 mvpp2_prs_hw_write(priv, &pe);
2004 /* Set port to unicast or multicast promiscuous mode */
2005 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
2006 enum mvpp2_prs_l2_cast l2_cast, bool add)
2008 struct mvpp2_prs_entry pe;
2009 unsigned char cast_match;
2013 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
2014 cast_match = MVPP2_PRS_UCAST_VAL;
2015 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
2016 ri = MVPP2_PRS_RI_L2_UCAST;
2018 cast_match = MVPP2_PRS_MCAST_VAL;
2019 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
2020 ri = MVPP2_PRS_RI_L2_MCAST;
2023 /* promiscuous mode - Accept unknown unicast or multicast packets */
2024 if (priv->prs_shadow[tid].valid) {
2025 mvpp2_prs_init_from_hw(priv, &pe, tid);
2027 memset(&pe, 0, sizeof(pe));
2028 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2031 /* Continue - set next lookup */
2032 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2034 /* Set result info bits */
2035 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
2037 /* Match UC or MC addresses */
2038 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
2039 MVPP2_PRS_CAST_MASK);
2041 /* Shift to ethertype */
2042 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2043 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2045 /* Mask all ports */
2046 mvpp2_prs_tcam_port_map_set(&pe, 0);
2048 /* Update shadow table */
2049 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2052 /* Update port mask */
2053 mvpp2_prs_tcam_port_set(&pe, port, add);
2055 mvpp2_prs_hw_write(priv, &pe);
2058 /* Set entry for dsa packets */
2059 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2060 bool tagged, bool extend)
2062 struct mvpp2_prs_entry pe;
2066 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2069 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2073 if (priv->prs_shadow[tid].valid) {
2074 /* Entry exist - update port only */
2075 mvpp2_prs_init_from_hw(priv, &pe, tid);
2077 /* Entry doesn't exist - create new */
2078 memset(&pe, 0, sizeof(pe));
2079 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2082 /* Update shadow table */
2083 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2086 /* Set tagged bit in DSA tag */
2087 mvpp2_prs_tcam_data_byte_set(&pe, 0,
2088 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2089 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2091 /* Set ai bits for next iteration */
2093 mvpp2_prs_sram_ai_update(&pe, 1,
2094 MVPP2_PRS_SRAM_AI_MASK);
2096 mvpp2_prs_sram_ai_update(&pe, 0,
2097 MVPP2_PRS_SRAM_AI_MASK);
2099 /* If packet is tagged continue check vid filtering */
2100 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
2102 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2103 mvpp2_prs_sram_shift_set(&pe, shift,
2104 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2106 /* Set result info bits to 'no vlans' */
2107 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2108 MVPP2_PRS_RI_VLAN_MASK);
2109 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2112 /* Mask all ports */
2113 mvpp2_prs_tcam_port_map_set(&pe, 0);
2116 /* Update port mask */
2117 mvpp2_prs_tcam_port_set(&pe, port, add);
2119 mvpp2_prs_hw_write(priv, &pe);
2122 /* Set entry for dsa ethertype */
2123 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2124 bool add, bool tagged, bool extend)
2126 struct mvpp2_prs_entry pe;
2127 int tid, shift, port_mask;
2130 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2131 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2135 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2136 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2137 port_mask = MVPP2_PRS_PORT_MASK;
2141 if (priv->prs_shadow[tid].valid) {
2142 /* Entry exist - update port only */
2143 mvpp2_prs_init_from_hw(priv, &pe, tid);
2145 /* Entry doesn't exist - create new */
2146 memset(&pe, 0, sizeof(pe));
2147 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2151 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2152 mvpp2_prs_match_etype(&pe, 2, 0);
2154 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2155 MVPP2_PRS_RI_DSA_MASK);
2156 /* Shift ethertype + 2 byte reserved + tag*/
2157 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2158 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2160 /* Update shadow table */
2161 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2164 /* Set tagged bit in DSA tag */
2165 mvpp2_prs_tcam_data_byte_set(&pe,
2166 MVPP2_ETH_TYPE_LEN + 2 + 3,
2167 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2168 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2169 /* Clear all ai bits for next iteration */
2170 mvpp2_prs_sram_ai_update(&pe, 0,
2171 MVPP2_PRS_SRAM_AI_MASK);
2172 /* If packet is tagged continue check vlans */
2173 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2175 /* Set result info bits to 'no vlans' */
2176 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2177 MVPP2_PRS_RI_VLAN_MASK);
2178 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2180 /* Mask/unmask all ports, depending on dsa type */
2181 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2184 /* Update port mask */
2185 mvpp2_prs_tcam_port_set(&pe, port, add);
2187 mvpp2_prs_hw_write(priv, &pe);
2190 /* Search for existing single/triple vlan entry */
2191 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2192 unsigned short tpid, int ai)
2194 struct mvpp2_prs_entry *pe;
2197 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2200 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2202 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2203 for (tid = MVPP2_PE_FIRST_FREE_TID;
2204 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2205 unsigned int ri_bits, ai_bits;
2208 if (!priv->prs_shadow[tid].valid ||
2209 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2212 mvpp2_prs_init_from_hw(priv, pe, tid);
2213 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2218 ri_bits = mvpp2_prs_sram_ri_get(pe);
2219 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2221 /* Get current ai value from tcam */
2222 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2223 /* Clear double vlan bit */
2224 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2229 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2230 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2238 /* Add/update single/triple vlan entry */
2239 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2240 unsigned int port_map)
2242 struct mvpp2_prs_entry *pe;
2246 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2249 /* Create new tcam entry */
2250 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2251 MVPP2_PE_FIRST_FREE_TID);
2255 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2259 /* Get last double vlan tid */
2260 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2261 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2262 unsigned int ri_bits;
2264 if (!priv->prs_shadow[tid_aux].valid ||
2265 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2268 mvpp2_prs_init_from_hw(priv, pe, tid_aux);
2269 ri_bits = mvpp2_prs_sram_ri_get(pe);
2270 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2271 MVPP2_PRS_RI_VLAN_DOUBLE)
2275 if (tid <= tid_aux) {
2280 memset(pe, 0, sizeof(*pe));
2281 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2284 mvpp2_prs_match_etype(pe, 0, tpid);
2286 /* VLAN tag detected, proceed with VID filtering */
2287 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
2289 /* Clear all ai bits for next iteration */
2290 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2292 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2293 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2294 MVPP2_PRS_RI_VLAN_MASK);
2296 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2297 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2298 MVPP2_PRS_RI_VLAN_MASK);
2300 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2302 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2304 /* Update ports' mask */
2305 mvpp2_prs_tcam_port_map_set(pe, port_map);
2307 mvpp2_prs_hw_write(priv, pe);
2314 /* Get first free double vlan ai number */
2315 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2319 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2320 if (!priv->prs_double_vlans[i])
2327 /* Search for existing double vlan entry */
2328 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2329 unsigned short tpid1,
2330 unsigned short tpid2)
2332 struct mvpp2_prs_entry *pe;
2335 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2338 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2340 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2341 for (tid = MVPP2_PE_FIRST_FREE_TID;
2342 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2343 unsigned int ri_mask;
2346 if (!priv->prs_shadow[tid].valid ||
2347 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2350 mvpp2_prs_init_from_hw(priv, pe, tid);
2352 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2353 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2358 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2359 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2367 /* Add or update double vlan entry */
2368 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2369 unsigned short tpid2,
2370 unsigned int port_map)
2372 struct mvpp2_prs_entry *pe;
2373 int tid_aux, tid, ai, ret = 0;
2375 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2378 /* Create new tcam entry */
2379 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2380 MVPP2_PE_LAST_FREE_TID);
2384 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2388 /* Set ai value for new double vlan entry */
2389 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2395 /* Get first single/triple vlan tid */
2396 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2397 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2398 unsigned int ri_bits;
2400 if (!priv->prs_shadow[tid_aux].valid ||
2401 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2404 mvpp2_prs_init_from_hw(priv, pe, tid_aux);
2405 ri_bits = mvpp2_prs_sram_ri_get(pe);
2406 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2407 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2408 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2412 if (tid >= tid_aux) {
2417 memset(pe, 0, sizeof(*pe));
2418 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2421 priv->prs_double_vlans[ai] = true;
2423 mvpp2_prs_match_etype(pe, 0, tpid1);
2424 mvpp2_prs_match_etype(pe, 4, tpid2);
2426 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2427 /* Shift 4 bytes - skip outer vlan tag */
2428 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2429 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2430 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2431 MVPP2_PRS_RI_VLAN_MASK);
2432 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2433 MVPP2_PRS_SRAM_AI_MASK);
2435 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2438 /* Update ports' mask */
2439 mvpp2_prs_tcam_port_map_set(pe, port_map);
2440 mvpp2_prs_hw_write(priv, pe);
2446 /* IPv4 header parsing for fragmentation and L4 offset */
2447 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2448 unsigned int ri, unsigned int ri_mask)
2450 struct mvpp2_prs_entry pe;
2453 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2454 (proto != IPPROTO_IGMP))
2457 /* Not fragmented packet */
2458 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2459 MVPP2_PE_LAST_FREE_TID);
2463 memset(&pe, 0, sizeof(pe));
2464 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2467 /* Set next lu to IPv4 */
2468 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2469 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2471 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2472 sizeof(struct iphdr) - 4,
2473 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2474 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2475 MVPP2_PRS_IPV4_DIP_AI_BIT);
2476 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2478 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2479 MVPP2_PRS_TCAM_PROTO_MASK_L);
2480 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2481 MVPP2_PRS_TCAM_PROTO_MASK);
2483 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2484 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2485 /* Unmask all ports */
2486 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2488 /* Update shadow table and hw entry */
2489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2490 mvpp2_prs_hw_write(priv, &pe);
2492 /* Fragmented packet */
2493 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2494 MVPP2_PE_LAST_FREE_TID);
2499 /* Clear ri before updating */
2500 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2501 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2502 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2504 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2505 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2507 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2508 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2510 /* Update shadow table and hw entry */
2511 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2512 mvpp2_prs_hw_write(priv, &pe);
2517 /* IPv4 L3 multicast or broadcast */
2518 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2520 struct mvpp2_prs_entry pe;
2523 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2524 MVPP2_PE_LAST_FREE_TID);
2528 memset(&pe, 0, sizeof(pe));
2529 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2533 case MVPP2_PRS_L3_MULTI_CAST:
2534 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2535 MVPP2_PRS_IPV4_MC_MASK);
2536 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2537 MVPP2_PRS_RI_L3_ADDR_MASK);
2539 case MVPP2_PRS_L3_BROAD_CAST:
2540 mask = MVPP2_PRS_IPV4_BC_MASK;
2541 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2542 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2543 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2544 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2545 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2546 MVPP2_PRS_RI_L3_ADDR_MASK);
2552 /* Finished: go to flowid generation */
2553 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2554 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2556 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2557 MVPP2_PRS_IPV4_DIP_AI_BIT);
2558 /* Unmask all ports */
2559 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2561 /* Update shadow table and hw entry */
2562 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2563 mvpp2_prs_hw_write(priv, &pe);
2568 /* Set entries for protocols over IPv6 */
2569 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2570 unsigned int ri, unsigned int ri_mask)
2572 struct mvpp2_prs_entry pe;
2575 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2576 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2579 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2580 MVPP2_PE_LAST_FREE_TID);
2584 memset(&pe, 0, sizeof(pe));
2585 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2588 /* Finished: go to flowid generation */
2589 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2590 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2591 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2592 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2593 sizeof(struct ipv6hdr) - 6,
2594 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2596 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2597 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2598 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2599 /* Unmask all ports */
2600 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2603 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2604 mvpp2_prs_hw_write(priv, &pe);
2609 /* IPv6 L3 multicast entry */
2610 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2612 struct mvpp2_prs_entry pe;
2615 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2618 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2619 MVPP2_PE_LAST_FREE_TID);
2623 memset(&pe, 0, sizeof(pe));
2624 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2627 /* Finished: go to flowid generation */
2628 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2629 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2630 MVPP2_PRS_RI_L3_ADDR_MASK);
2631 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2632 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2633 /* Shift back to IPv6 NH */
2634 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2636 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2637 MVPP2_PRS_IPV6_MC_MASK);
2638 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2639 /* Unmask all ports */
2640 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2642 /* Update shadow table and hw entry */
2643 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2644 mvpp2_prs_hw_write(priv, &pe);
2649 /* Parser per-port initialization */
2650 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2651 int lu_max, int offset)
2656 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2657 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2658 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2659 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2661 /* Set maximum number of loops for packet received from port */
2662 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2663 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2664 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2665 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2667 /* Set initial offset for packet header extraction for the first
2670 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2671 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2672 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2673 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2676 /* Default flow entries initialization for all ports */
2677 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2679 struct mvpp2_prs_entry pe;
2682 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2683 memset(&pe, 0, sizeof(pe));
2684 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2685 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2687 /* Mask all ports */
2688 mvpp2_prs_tcam_port_map_set(&pe, 0);
2691 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2692 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2694 /* Update shadow table and hw entry */
2695 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2696 mvpp2_prs_hw_write(priv, &pe);
2700 /* Set default entry for Marvell Header field */
2701 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2703 struct mvpp2_prs_entry pe;
2705 memset(&pe, 0, sizeof(pe));
2707 pe.index = MVPP2_PE_MH_DEFAULT;
2708 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2709 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2710 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2711 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2713 /* Unmask all ports */
2714 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2716 /* Update shadow table and hw entry */
2717 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2718 mvpp2_prs_hw_write(priv, &pe);
2721 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2722 * multicast MAC addresses
2724 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2726 struct mvpp2_prs_entry pe;
2728 memset(&pe, 0, sizeof(pe));
2730 /* Non-promiscuous mode for all ports - DROP unknown packets */
2731 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2732 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2734 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2735 MVPP2_PRS_RI_DROP_MASK);
2736 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2737 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2739 /* Unmask all ports */
2740 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2742 /* Update shadow table and hw entry */
2743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2744 mvpp2_prs_hw_write(priv, &pe);
2746 /* Create dummy entries for drop all and promiscuous modes */
2747 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2748 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
2749 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
2752 /* Set default entries for various types of dsa packets */
2753 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2755 struct mvpp2_prs_entry pe;
2757 /* None tagged EDSA entry - place holder */
2758 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2761 /* Tagged EDSA entry - place holder */
2762 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2764 /* None tagged DSA entry - place holder */
2765 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2768 /* Tagged DSA entry - place holder */
2769 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2771 /* None tagged EDSA ethertype entry - place holder*/
2772 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2773 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2775 /* Tagged EDSA ethertype entry - place holder*/
2776 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2777 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2779 /* None tagged DSA ethertype entry */
2780 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2781 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2783 /* Tagged DSA ethertype entry */
2784 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2785 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2787 /* Set default entry, in case DSA or EDSA tag not found */
2788 memset(&pe, 0, sizeof(pe));
2789 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2790 pe.index = MVPP2_PE_DSA_DEFAULT;
2791 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2794 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2795 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2797 /* Clear all sram ai bits for next iteration */
2798 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2800 /* Unmask all ports */
2801 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2803 mvpp2_prs_hw_write(priv, &pe);
2806 /* Initialize parser entries for VID filtering */
2807 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2809 struct mvpp2_prs_entry pe;
2811 memset(&pe, 0, sizeof(pe));
2813 /* Set default vid entry */
2814 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2815 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2817 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2819 /* Skip VLAN header - Set offset to 4 bytes */
2820 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2821 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2823 /* Clear all ai bits for next iteration */
2824 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2826 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2828 /* Unmask all ports */
2829 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2831 /* Update shadow table and hw entry */
2832 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2833 mvpp2_prs_hw_write(priv, &pe);
2835 /* Set default vid entry for extended DSA*/
2836 memset(&pe, 0, sizeof(pe));
2838 /* Set default vid entry */
2839 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2842 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2843 MVPP2_PRS_EDSA_VID_AI_BIT);
2845 /* Skip VLAN header - Set offset to 8 bytes */
2846 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2847 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2849 /* Clear all ai bits for next iteration */
2850 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2852 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2854 /* Unmask all ports */
2855 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2857 /* Update shadow table and hw entry */
2858 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2859 mvpp2_prs_hw_write(priv, &pe);
2862 /* Match basic ethertypes */
2863 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2865 struct mvpp2_prs_entry pe;
2868 /* Ethertype: PPPoE */
2869 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2870 MVPP2_PE_LAST_FREE_TID);
2874 memset(&pe, 0, sizeof(pe));
2875 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2878 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2880 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2884 MVPP2_PRS_RI_PPPOE_MASK);
2886 /* Update shadow table and hw entry */
2887 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2888 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2889 priv->prs_shadow[pe.index].finish = false;
2890 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2891 MVPP2_PRS_RI_PPPOE_MASK);
2892 mvpp2_prs_hw_write(priv, &pe);
2894 /* Ethertype: ARP */
2895 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2896 MVPP2_PE_LAST_FREE_TID);
2900 memset(&pe, 0, sizeof(pe));
2901 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2904 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2906 /* Generate flow in the next iteration*/
2907 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2908 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2909 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2910 MVPP2_PRS_RI_L3_PROTO_MASK);
2912 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2914 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2916 /* Update shadow table and hw entry */
2917 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2918 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2919 priv->prs_shadow[pe.index].finish = true;
2920 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2921 MVPP2_PRS_RI_L3_PROTO_MASK);
2922 mvpp2_prs_hw_write(priv, &pe);
2924 /* Ethertype: LBTD */
2925 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2926 MVPP2_PE_LAST_FREE_TID);
2930 memset(&pe, 0, sizeof(pe));
2931 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2934 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2936 /* Generate flow in the next iteration*/
2937 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2938 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2939 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2940 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2941 MVPP2_PRS_RI_CPU_CODE_MASK |
2942 MVPP2_PRS_RI_UDF3_MASK);
2944 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2946 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2948 /* Update shadow table and hw entry */
2949 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2950 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2951 priv->prs_shadow[pe.index].finish = true;
2952 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2953 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2954 MVPP2_PRS_RI_CPU_CODE_MASK |
2955 MVPP2_PRS_RI_UDF3_MASK);
2956 mvpp2_prs_hw_write(priv, &pe);
2958 /* Ethertype: IPv4 without options */
2959 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2960 MVPP2_PE_LAST_FREE_TID);
2964 memset(&pe, 0, sizeof(pe));
2965 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2968 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2969 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2970 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2971 MVPP2_PRS_IPV4_HEAD_MASK |
2972 MVPP2_PRS_IPV4_IHL_MASK);
2974 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2975 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2976 MVPP2_PRS_RI_L3_PROTO_MASK);
2977 /* Skip eth_type + 4 bytes of IP header */
2978 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2979 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2981 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2983 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2985 /* Update shadow table and hw entry */
2986 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2987 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2988 priv->prs_shadow[pe.index].finish = false;
2989 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2990 MVPP2_PRS_RI_L3_PROTO_MASK);
2991 mvpp2_prs_hw_write(priv, &pe);
2993 /* Ethertype: IPv4 with options */
2994 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2995 MVPP2_PE_LAST_FREE_TID);
3001 /* Clear tcam data before updating */
3002 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
3003 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
3005 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3006 MVPP2_PRS_IPV4_HEAD,
3007 MVPP2_PRS_IPV4_HEAD_MASK);
3009 /* Clear ri before updating */
3010 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3011 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3013 MVPP2_PRS_RI_L3_PROTO_MASK);
3015 /* Update shadow table and hw entry */
3016 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3017 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3018 priv->prs_shadow[pe.index].finish = false;
3019 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3020 MVPP2_PRS_RI_L3_PROTO_MASK);
3021 mvpp2_prs_hw_write(priv, &pe);
3023 /* Ethertype: IPv6 without options */
3024 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3025 MVPP2_PE_LAST_FREE_TID);
3029 memset(&pe, 0, sizeof(pe));
3030 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3033 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3035 /* Skip DIP of IPV6 header */
3036 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3037 MVPP2_MAX_L3_ADDR_SIZE,
3038 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3039 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3040 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3041 MVPP2_PRS_RI_L3_PROTO_MASK);
3043 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3045 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3047 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3048 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3049 priv->prs_shadow[pe.index].finish = false;
3050 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3051 MVPP2_PRS_RI_L3_PROTO_MASK);
3052 mvpp2_prs_hw_write(priv, &pe);
3054 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3055 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3056 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3057 pe.index = MVPP2_PE_ETH_TYPE_UN;
3059 /* Unmask all ports */
3060 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3062 /* Generate flow in the next iteration*/
3063 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3064 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3065 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3066 MVPP2_PRS_RI_L3_PROTO_MASK);
3067 /* Set L3 offset even it's unknown L3 */
3068 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3070 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3072 /* Update shadow table and hw entry */
3073 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3074 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3075 priv->prs_shadow[pe.index].finish = true;
3076 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3077 MVPP2_PRS_RI_L3_PROTO_MASK);
3078 mvpp2_prs_hw_write(priv, &pe);
3083 /* Configure vlan entries and detect up to 2 successive VLAN tags.
3090 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3092 struct mvpp2_prs_entry pe;
3095 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3096 MVPP2_PRS_DBL_VLANS_MAX,
3098 if (!priv->prs_double_vlans)
3101 /* Double VLAN: 0x8100, 0x88A8 */
3102 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3103 MVPP2_PRS_PORT_MASK);
3107 /* Double VLAN: 0x8100, 0x8100 */
3108 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3109 MVPP2_PRS_PORT_MASK);
3113 /* Single VLAN: 0x88a8 */
3114 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3115 MVPP2_PRS_PORT_MASK);
3119 /* Single VLAN: 0x8100 */
3120 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3121 MVPP2_PRS_PORT_MASK);
3125 /* Set default double vlan entry */
3126 memset(&pe, 0, sizeof(pe));
3127 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3128 pe.index = MVPP2_PE_VLAN_DBL;
3130 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3132 /* Clear ai for next iterations */
3133 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3134 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3135 MVPP2_PRS_RI_VLAN_MASK);
3137 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3138 MVPP2_PRS_DBL_VLAN_AI_BIT);
3139 /* Unmask all ports */
3140 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3142 /* Update shadow table and hw entry */
3143 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3144 mvpp2_prs_hw_write(priv, &pe);
3146 /* Set default vlan none entry */
3147 memset(&pe, 0, sizeof(pe));
3148 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3149 pe.index = MVPP2_PE_VLAN_NONE;
3151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3152 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3153 MVPP2_PRS_RI_VLAN_MASK);
3155 /* Unmask all ports */
3156 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3158 /* Update shadow table and hw entry */
3159 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3160 mvpp2_prs_hw_write(priv, &pe);
3165 /* Set entries for PPPoE ethertype */
3166 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3168 struct mvpp2_prs_entry pe;
3171 /* IPv4 over PPPoE with options */
3172 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3173 MVPP2_PE_LAST_FREE_TID);
3177 memset(&pe, 0, sizeof(pe));
3178 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3181 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3183 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3184 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3185 MVPP2_PRS_RI_L3_PROTO_MASK);
3186 /* Skip eth_type + 4 bytes of IP header */
3187 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3188 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3190 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3192 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3194 /* Update shadow table and hw entry */
3195 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3196 mvpp2_prs_hw_write(priv, &pe);
3198 /* IPv4 over PPPoE without options */
3199 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3200 MVPP2_PE_LAST_FREE_TID);
3206 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3207 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3208 MVPP2_PRS_IPV4_HEAD_MASK |
3209 MVPP2_PRS_IPV4_IHL_MASK);
3211 /* Clear ri before updating */
3212 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3213 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3214 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3215 MVPP2_PRS_RI_L3_PROTO_MASK);
3217 /* Update shadow table and hw entry */
3218 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3219 mvpp2_prs_hw_write(priv, &pe);
3221 /* IPv6 over PPPoE */
3222 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3223 MVPP2_PE_LAST_FREE_TID);
3227 memset(&pe, 0, sizeof(pe));
3228 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3231 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3233 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3234 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3235 MVPP2_PRS_RI_L3_PROTO_MASK);
3236 /* Skip eth_type + 4 bytes of IPv6 header */
3237 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3238 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3240 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3242 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3244 /* Update shadow table and hw entry */
3245 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3246 mvpp2_prs_hw_write(priv, &pe);
3248 /* Non-IP over PPPoE */
3249 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3250 MVPP2_PE_LAST_FREE_TID);
3254 memset(&pe, 0, sizeof(pe));
3255 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3258 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3259 MVPP2_PRS_RI_L3_PROTO_MASK);
3261 /* Finished: go to flowid generation */
3262 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3263 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3264 /* Set L3 offset even if it's unknown L3 */
3265 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3267 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3269 /* Update shadow table and hw entry */
3270 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3271 mvpp2_prs_hw_write(priv, &pe);
3276 /* Initialize entries for IPv4 */
3277 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3279 struct mvpp2_prs_entry pe;
3282 /* Set entries for TCP, UDP and IGMP over IPv4 */
3283 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3284 MVPP2_PRS_RI_L4_PROTO_MASK);
3288 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3289 MVPP2_PRS_RI_L4_PROTO_MASK);
3293 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3294 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3295 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3296 MVPP2_PRS_RI_CPU_CODE_MASK |
3297 MVPP2_PRS_RI_UDF3_MASK);
3301 /* IPv4 Broadcast */
3302 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3306 /* IPv4 Multicast */
3307 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3311 /* Default IPv4 entry for unknown protocols */
3312 memset(&pe, 0, sizeof(pe));
3313 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3314 pe.index = MVPP2_PE_IP4_PROTO_UN;
3316 /* Set next lu to IPv4 */
3317 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3318 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3320 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3321 sizeof(struct iphdr) - 4,
3322 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3323 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3324 MVPP2_PRS_IPV4_DIP_AI_BIT);
3325 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3326 MVPP2_PRS_RI_L4_PROTO_MASK);
3328 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3329 /* Unmask all ports */
3330 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3332 /* Update shadow table and hw entry */
3333 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3334 mvpp2_prs_hw_write(priv, &pe);
3336 /* Default IPv4 entry for unicast address */
3337 memset(&pe, 0, sizeof(pe));
3338 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3339 pe.index = MVPP2_PE_IP4_ADDR_UN;
3341 /* Finished: go to flowid generation */
3342 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3343 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3344 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3345 MVPP2_PRS_RI_L3_ADDR_MASK);
3347 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3348 MVPP2_PRS_IPV4_DIP_AI_BIT);
3349 /* Unmask all ports */
3350 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3352 /* Update shadow table and hw entry */
3353 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3354 mvpp2_prs_hw_write(priv, &pe);
3359 /* Initialize entries for IPv6 */
3360 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3362 struct mvpp2_prs_entry pe;
3365 /* Set entries for TCP, UDP and ICMP over IPv6 */
3366 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3367 MVPP2_PRS_RI_L4_TCP,
3368 MVPP2_PRS_RI_L4_PROTO_MASK);
3372 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3373 MVPP2_PRS_RI_L4_UDP,
3374 MVPP2_PRS_RI_L4_PROTO_MASK);
3378 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3379 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3380 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3381 MVPP2_PRS_RI_CPU_CODE_MASK |
3382 MVPP2_PRS_RI_UDF3_MASK);
3386 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3387 /* Result Info: UDF7=1, DS lite */
3388 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3389 MVPP2_PRS_RI_UDF7_IP6_LITE,
3390 MVPP2_PRS_RI_UDF7_MASK);
3394 /* IPv6 multicast */
3395 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3399 /* Entry for checking hop limit */
3400 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3401 MVPP2_PE_LAST_FREE_TID);
3405 memset(&pe, 0, sizeof(pe));
3406 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3409 /* Finished: go to flowid generation */
3410 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3411 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3412 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3413 MVPP2_PRS_RI_DROP_MASK,
3414 MVPP2_PRS_RI_L3_PROTO_MASK |
3415 MVPP2_PRS_RI_DROP_MASK);
3417 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3418 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3419 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3421 /* Update shadow table and hw entry */
3422 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3423 mvpp2_prs_hw_write(priv, &pe);
3425 /* Default IPv6 entry for unknown protocols */
3426 memset(&pe, 0, sizeof(pe));
3427 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3428 pe.index = MVPP2_PE_IP6_PROTO_UN;
3430 /* Finished: go to flowid generation */
3431 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3432 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3433 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3434 MVPP2_PRS_RI_L4_PROTO_MASK);
3435 /* Set L4 offset relatively to our current place */
3436 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3437 sizeof(struct ipv6hdr) - 4,
3438 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3440 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3441 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3442 /* Unmask all ports */
3443 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3445 /* Update shadow table and hw entry */
3446 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3447 mvpp2_prs_hw_write(priv, &pe);
3449 /* Default IPv6 entry for unknown ext protocols */
3450 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3451 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3452 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3454 /* Finished: go to flowid generation */
3455 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3456 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3457 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3458 MVPP2_PRS_RI_L4_PROTO_MASK);
3460 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3461 MVPP2_PRS_IPV6_EXT_AI_BIT);
3462 /* Unmask all ports */
3463 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3465 /* Update shadow table and hw entry */
3466 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3467 mvpp2_prs_hw_write(priv, &pe);
3469 /* Default IPv6 entry for unicast address */
3470 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3471 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3472 pe.index = MVPP2_PE_IP6_ADDR_UN;
3474 /* Finished: go to IPv6 again */
3475 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3476 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3477 MVPP2_PRS_RI_L3_ADDR_MASK);
3478 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3479 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3480 /* Shift back to IPV6 NH */
3481 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3483 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3484 /* Unmask all ports */
3485 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3487 /* Update shadow table and hw entry */
3488 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3489 mvpp2_prs_hw_write(priv, &pe);
3494 /* Find tcam entry with matched pair <vid,port> */
3495 static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3498 unsigned char byte[2], enable[2];
3499 struct mvpp2_prs_entry pe;
3503 /* Go through the all entries with MVPP2_PRS_LU_VID */
3504 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3505 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3506 if (!priv->prs_shadow[tid].valid ||
3507 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3510 mvpp2_prs_init_from_hw(priv, &pe, tid);
3511 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3512 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3514 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3515 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3517 if (rvid != vid || rmask != mask)
3526 /* Write parser entry for VID filtering */
3527 static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3529 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3530 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3531 unsigned int mask = 0xfff, reg_val, shift;
3532 struct mvpp2 *priv = port->priv;
3533 struct mvpp2_prs_entry pe;
3536 /* Scan TCAM and see if entry with this <vid,port> already exist */
3537 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3539 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3540 if (reg_val & MVPP2_DSA_EXTENDED)
3541 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3543 shift = MVPP2_VLAN_TAG_LEN;
3547 memset(&pe, 0, sizeof(pe));
3549 /* Go through all entries from first to last in vlan range */
3550 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3552 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3554 /* There isn't room for a new VID filter */
3558 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3561 /* Mask all ports */
3562 mvpp2_prs_tcam_port_map_set(&pe, 0);
3564 mvpp2_prs_init_from_hw(priv, &pe, tid);
3567 /* Enable the current port */
3568 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3570 /* Continue - set next lookup */
3571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3573 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3574 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3576 /* Set match on VID */
3577 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3579 /* Clear all ai bits for next iteration */
3580 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3582 /* Update shadow table */
3583 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3584 mvpp2_prs_hw_write(priv, &pe);
3589 /* Write parser entry for VID filtering */
3590 static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3592 struct mvpp2 *priv = port->priv;
3595 /* Scan TCAM and see if entry with this <vid,port> already exist */
3596 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3602 mvpp2_prs_hw_inv(priv, tid);
3603 priv->prs_shadow[tid].valid = false;
3606 /* Remove all existing VID filters on this port */
3607 static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3609 struct mvpp2 *priv = port->priv;
3612 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3613 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3614 if (priv->prs_shadow[tid].valid)
3615 mvpp2_prs_vid_entry_remove(port, tid);
3619 /* Remove VID filering entry for this port */
3620 static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3622 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3623 struct mvpp2 *priv = port->priv;
3625 /* Invalidate the guard entry */
3626 mvpp2_prs_hw_inv(priv, tid);
3628 priv->prs_shadow[tid].valid = false;
3631 /* Add guard entry that drops packets when no VID is matched on this port */
3632 static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3634 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3635 struct mvpp2 *priv = port->priv;
3636 unsigned int reg_val, shift;
3637 struct mvpp2_prs_entry pe;
3639 if (priv->prs_shadow[tid].valid)
3642 memset(&pe, 0, sizeof(pe));
3646 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3647 if (reg_val & MVPP2_DSA_EXTENDED)
3648 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3650 shift = MVPP2_VLAN_TAG_LEN;
3652 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3654 /* Mask all ports */
3655 mvpp2_prs_tcam_port_map_set(&pe, 0);
3657 /* Update port mask */
3658 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3660 /* Continue - set next lookup */
3661 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3663 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3664 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3666 /* Drop VLAN packets that don't belong to any VIDs on this port */
3667 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3668 MVPP2_PRS_RI_DROP_MASK);
3670 /* Clear all ai bits for next iteration */
3671 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3673 /* Update shadow table */
3674 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3675 mvpp2_prs_hw_write(priv, &pe);
3678 /* Parser default initialization */
3679 static int mvpp2_prs_default_init(struct platform_device *pdev,
3684 /* Enable tcam table */
3685 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3687 /* Clear all tcam and sram entries */
3688 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3689 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3690 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3691 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3693 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3694 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3695 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3698 /* Invalidate all tcam entries */
3699 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3700 mvpp2_prs_hw_inv(priv, index);
3702 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3703 sizeof(*priv->prs_shadow),
3705 if (!priv->prs_shadow)
3708 /* Always start from lookup = 0 */
3709 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3710 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3711 MVPP2_PRS_PORT_LU_MAX, 0);
3713 mvpp2_prs_def_flow_init(priv);
3715 mvpp2_prs_mh_init(priv);
3717 mvpp2_prs_mac_init(priv);
3719 mvpp2_prs_dsa_init(priv);
3721 mvpp2_prs_vid_init(priv);
3723 err = mvpp2_prs_etype_init(priv);
3727 err = mvpp2_prs_vlan_init(pdev, priv);
3731 err = mvpp2_prs_pppoe_init(priv);
3735 err = mvpp2_prs_ip6_init(priv);
3739 err = mvpp2_prs_ip4_init(priv);
3746 /* Compare MAC DA with tcam entry data */
3747 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3748 const u8 *da, unsigned char *mask)
3750 unsigned char tcam_byte, tcam_mask;
3753 for (index = 0; index < ETH_ALEN; index++) {
3754 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3755 if (tcam_mask != mask[index])
3758 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3765 /* Find tcam entry with matched pair <MAC DA, port> */
3766 static struct mvpp2_prs_entry *
3767 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3768 unsigned char *mask, int udf_type)
3770 struct mvpp2_prs_entry *pe;
3773 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3776 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3778 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3779 for (tid = MVPP2_PE_MAC_RANGE_START;
3780 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3781 unsigned int entry_pmap;
3783 if (!priv->prs_shadow[tid].valid ||
3784 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3785 (priv->prs_shadow[tid].udf != udf_type))
3788 mvpp2_prs_init_from_hw(priv, pe, tid);
3789 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3791 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3800 /* Update parser's mac da entry */
3801 static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
3804 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3805 struct mvpp2 *priv = port->priv;
3806 unsigned int pmap, len, ri;
3807 struct mvpp2_prs_entry *pe;
3810 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3811 pe = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
3812 MVPP2_PRS_UDF_MAC_DEF);
3819 /* Create new TCAM entry */
3820 /* Go through the all entries from first to last */
3821 tid = mvpp2_prs_tcam_first_free(priv,
3822 MVPP2_PE_MAC_RANGE_START,
3823 MVPP2_PE_MAC_RANGE_END);
3827 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3830 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3833 /* Mask all ports */
3834 mvpp2_prs_tcam_port_map_set(pe, 0);
3837 /* Update port mask */
3838 mvpp2_prs_tcam_port_set(pe, port->id, add);
3840 /* Invalidate the entry if no ports are left enabled */
3841 pmap = mvpp2_prs_tcam_port_map_get(pe);
3847 mvpp2_prs_hw_inv(priv, pe->index);
3848 priv->prs_shadow[pe->index].valid = false;
3853 /* Continue - set next lookup */
3854 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3856 /* Set match on DA */
3859 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3861 /* Set result info bits */
3862 if (is_broadcast_ether_addr(da)) {
3863 ri = MVPP2_PRS_RI_L2_BCAST;
3864 } else if (is_multicast_ether_addr(da)) {
3865 ri = MVPP2_PRS_RI_L2_MCAST;
3867 ri = MVPP2_PRS_RI_L2_UCAST;
3869 if (ether_addr_equal(da, port->dev->dev_addr))
3870 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
3873 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3874 MVPP2_PRS_RI_MAC_ME_MASK);
3875 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3876 MVPP2_PRS_RI_MAC_ME_MASK);
3878 /* Shift to ethertype */
3879 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3880 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3882 /* Update shadow table and hw entry */
3883 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3884 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3885 mvpp2_prs_hw_write(priv, pe);
3892 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3894 struct mvpp2_port *port = netdev_priv(dev);
3897 /* Remove old parser entry */
3898 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
3902 /* Add new parser entry */
3903 err = mvpp2_prs_mac_da_accept(port, da, true);
3907 /* Set addr in the device */
3908 ether_addr_copy(dev->dev_addr, da);
3913 static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
3915 struct mvpp2 *priv = port->priv;
3916 struct mvpp2_prs_entry pe;
3920 for (tid = MVPP2_PE_MAC_RANGE_START;
3921 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3922 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3924 if (!priv->prs_shadow[tid].valid ||
3925 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3926 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3929 mvpp2_prs_init_from_hw(priv, &pe, tid);
3931 pmap = mvpp2_prs_tcam_port_map_get(&pe);
3933 /* We only want entries active on this port */
3934 if (!test_bit(port->id, &pmap))
3937 /* Read mac addr from entry */
3938 for (index = 0; index < ETH_ALEN; index++)
3939 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3942 /* Special cases : Don't remove broadcast and port's own
3945 if (is_broadcast_ether_addr(da) ||
3946 ether_addr_equal(da, port->dev->dev_addr))
3949 /* Remove entry from TCAM */
3950 mvpp2_prs_mac_da_accept(port, da, false);
3954 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3957 case MVPP2_TAG_TYPE_EDSA:
3958 /* Add port to EDSA entries */
3959 mvpp2_prs_dsa_tag_set(priv, port, true,
3960 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3961 mvpp2_prs_dsa_tag_set(priv, port, true,
3962 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3963 /* Remove port from DSA entries */
3964 mvpp2_prs_dsa_tag_set(priv, port, false,
3965 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3966 mvpp2_prs_dsa_tag_set(priv, port, false,
3967 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3970 case MVPP2_TAG_TYPE_DSA:
3971 /* Add port to DSA entries */
3972 mvpp2_prs_dsa_tag_set(priv, port, true,
3973 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3974 mvpp2_prs_dsa_tag_set(priv, port, true,
3975 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3976 /* Remove port from EDSA entries */
3977 mvpp2_prs_dsa_tag_set(priv, port, false,
3978 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3979 mvpp2_prs_dsa_tag_set(priv, port, false,
3980 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3983 case MVPP2_TAG_TYPE_MH:
3984 case MVPP2_TAG_TYPE_NONE:
3985 /* Remove port form EDSA and DSA entries */
3986 mvpp2_prs_dsa_tag_set(priv, port, false,
3987 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3988 mvpp2_prs_dsa_tag_set(priv, port, false,
3989 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3990 mvpp2_prs_dsa_tag_set(priv, port, false,
3991 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3992 mvpp2_prs_dsa_tag_set(priv, port, false,
3993 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3997 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4004 /* Set prs flow for the port */
4005 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
4007 struct mvpp2_prs_entry *pe;
4010 pe = mvpp2_prs_flow_find(port->priv, port->id);
4012 /* Such entry not exist */
4014 /* Go through the all entires from last to first */
4015 tid = mvpp2_prs_tcam_first_free(port->priv,
4016 MVPP2_PE_LAST_FREE_TID,
4017 MVPP2_PE_FIRST_FREE_TID);
4021 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
4025 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
4029 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4030 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4032 /* Update shadow table */
4033 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
4036 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
4037 mvpp2_prs_hw_write(port->priv, pe);
4043 /* Classifier configuration routines */
4045 /* Update classification flow table registers */
4046 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4047 struct mvpp2_cls_flow_entry *fe)
4049 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4050 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4051 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4052 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4055 /* Update classification lookup table register */
4056 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4057 struct mvpp2_cls_lookup_entry *le)
4061 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4062 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4063 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4066 /* Classifier default initialization */
4067 static void mvpp2_cls_init(struct mvpp2 *priv)
4069 struct mvpp2_cls_lookup_entry le;
4070 struct mvpp2_cls_flow_entry fe;
4073 /* Enable classifier */
4074 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4076 /* Clear classifier flow table */
4077 memset(&fe.data, 0, sizeof(fe.data));
4078 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4080 mvpp2_cls_flow_write(priv, &fe);
4083 /* Clear classifier lookup table */
4085 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4088 mvpp2_cls_lookup_write(priv, &le);
4091 mvpp2_cls_lookup_write(priv, &le);
4095 static void mvpp2_cls_port_config(struct mvpp2_port *port)
4097 struct mvpp2_cls_lookup_entry le;
4100 /* Set way for the port */
4101 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4102 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4103 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4105 /* Pick the entry to be accessed in lookup ID decoding table
4106 * according to the way and lkpid.
4108 le.lkpid = port->id;
4112 /* Set initial CPU queue for receiving packets */
4113 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4114 le.data |= port->first_rxq;
4116 /* Disable classification engines */
4117 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4119 /* Update lookup ID table entry */
4120 mvpp2_cls_lookup_write(port->priv, &le);
4123 /* Set CPU queue number for oversize packets */
4124 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4128 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4129 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4131 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4132 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4134 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4135 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4136 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4139 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4141 if (likely(pool->frag_size <= PAGE_SIZE))
4142 return netdev_alloc_frag(pool->frag_size);
4144 return kmalloc(pool->frag_size, GFP_ATOMIC);
4147 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4149 if (likely(pool->frag_size <= PAGE_SIZE))
4150 skb_free_frag(data);
4155 /* Buffer Manager configuration routines */
4158 static int mvpp2_bm_pool_create(struct platform_device *pdev,
4160 struct mvpp2_bm_pool *bm_pool, int size)
4164 /* Number of buffer pointers must be a multiple of 16, as per
4165 * hardware constraints
4167 if (!IS_ALIGNED(size, 16))
4170 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4171 * bytes per buffer pointer
4173 if (priv->hw_version == MVPP21)
4174 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4176 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4178 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
4181 if (!bm_pool->virt_addr)
4184 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4185 MVPP2_BM_POOL_PTR_ALIGN)) {
4186 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4187 bm_pool->virt_addr, bm_pool->dma_addr);
4188 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4189 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4193 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
4194 lower_32_bits(bm_pool->dma_addr));
4195 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4197 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4198 val |= MVPP2_BM_START_MASK;
4199 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4201 bm_pool->size = size;
4202 bm_pool->pkt_size = 0;
4203 bm_pool->buf_num = 0;
4208 /* Set pool buffer size */
4209 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4210 struct mvpp2_bm_pool *bm_pool,
4215 bm_pool->buf_size = buf_size;
4217 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4218 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4221 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4222 struct mvpp2_bm_pool *bm_pool,
4223 dma_addr_t *dma_addr,
4224 phys_addr_t *phys_addr)
4226 int cpu = get_cpu();
4228 *dma_addr = mvpp2_percpu_read(priv, cpu,
4229 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4230 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
4232 if (priv->hw_version == MVPP22) {
4234 u32 dma_addr_highbits, phys_addr_highbits;
4236 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
4237 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4238 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4239 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4241 if (sizeof(dma_addr_t) == 8)
4242 *dma_addr |= (u64)dma_addr_highbits << 32;
4244 if (sizeof(phys_addr_t) == 8)
4245 *phys_addr |= (u64)phys_addr_highbits << 32;
4251 /* Free all buffers from the pool */
4252 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
4253 struct mvpp2_bm_pool *bm_pool, int buf_num)
4257 if (buf_num > bm_pool->buf_num) {
4258 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
4259 bm_pool->id, buf_num);
4260 buf_num = bm_pool->buf_num;
4263 for (i = 0; i < buf_num; i++) {
4264 dma_addr_t buf_dma_addr;
4265 phys_addr_t buf_phys_addr;
4268 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4269 &buf_dma_addr, &buf_phys_addr);
4271 dma_unmap_single(dev, buf_dma_addr,
4272 bm_pool->buf_size, DMA_FROM_DEVICE);
4274 data = (void *)phys_to_virt(buf_phys_addr);
4278 mvpp2_frag_free(bm_pool, data);
4281 /* Update BM driver with number of buffers removed from pool */
4282 bm_pool->buf_num -= i;
4285 /* Check number of buffers in BM pool */
4286 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
4290 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
4291 MVPP22_BM_POOL_PTRS_NUM_MASK;
4292 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
4293 MVPP2_BM_BPPI_PTR_NUM_MASK;
4295 /* HW has one buffer ready which is not reflected in the counters */
4303 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4305 struct mvpp2_bm_pool *bm_pool)
4310 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4311 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
4313 /* Check buffer counters after free */
4314 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4316 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
4317 bm_pool->id, bm_pool->buf_num);
4321 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4322 val |= MVPP2_BM_STOP_MASK;
4323 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4325 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4331 static int mvpp2_bm_pools_init(struct platform_device *pdev,
4335 struct mvpp2_bm_pool *bm_pool;
4337 /* Create all pools with maximum size */
4338 size = MVPP2_BM_POOL_SIZE_MAX;
4339 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4340 bm_pool = &priv->bm_pools[i];
4342 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4344 goto err_unroll_pools;
4345 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4350 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4351 for (i = i - 1; i >= 0; i--)
4352 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4356 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4360 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4361 /* Mask BM all interrupts */
4362 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4363 /* Clear BM cause register */
4364 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4367 /* Allocate and initialize BM pools */
4368 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
4369 sizeof(*priv->bm_pools), GFP_KERNEL);
4370 if (!priv->bm_pools)
4373 err = mvpp2_bm_pools_init(pdev, priv);
4379 static void mvpp2_setup_bm_pool(void)
4382 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
4383 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4386 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
4387 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
4390 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
4391 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
4394 /* Attach long pool to rxq */
4395 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4396 int lrxq, int long_pool)
4401 /* Get queue physical ID */
4402 prxq = port->rxqs[lrxq]->id;
4404 if (port->priv->hw_version == MVPP21)
4405 mask = MVPP21_RXQ_POOL_LONG_MASK;
4407 mask = MVPP22_RXQ_POOL_LONG_MASK;
4409 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4411 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
4412 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4415 /* Attach short pool to rxq */
4416 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4417 int lrxq, int short_pool)
4422 /* Get queue physical ID */
4423 prxq = port->rxqs[lrxq]->id;
4425 if (port->priv->hw_version == MVPP21)
4426 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4428 mask = MVPP22_RXQ_POOL_SHORT_MASK;
4430 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4432 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
4433 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4436 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4437 struct mvpp2_bm_pool *bm_pool,
4438 dma_addr_t *buf_dma_addr,
4439 phys_addr_t *buf_phys_addr,
4442 dma_addr_t dma_addr;
4445 data = mvpp2_frag_alloc(bm_pool);
4449 dma_addr = dma_map_single(port->dev->dev.parent, data,
4450 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4452 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
4453 mvpp2_frag_free(bm_pool, data);
4456 *buf_dma_addr = dma_addr;
4457 *buf_phys_addr = virt_to_phys(data);
4462 /* Release buffer to BM */
4463 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
4464 dma_addr_t buf_dma_addr,
4465 phys_addr_t buf_phys_addr)
4467 int cpu = get_cpu();
4469 if (port->priv->hw_version == MVPP22) {
4472 if (sizeof(dma_addr_t) == 8)
4473 val |= upper_32_bits(buf_dma_addr) &
4474 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4476 if (sizeof(phys_addr_t) == 8)
4477 val |= (upper_32_bits(buf_phys_addr)
4478 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4479 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4481 mvpp2_percpu_write(port->priv, cpu,
4482 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
4485 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4486 * returned in the "cookie" field of the RX
4487 * descriptor. Instead of storing the virtual address, we
4488 * store the physical address
4490 mvpp2_percpu_write(port->priv, cpu,
4491 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4492 mvpp2_percpu_write(port->priv, cpu,
4493 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
4498 /* Allocate buffers for the pool */
4499 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4500 struct mvpp2_bm_pool *bm_pool, int buf_num)
4502 int i, buf_size, total_size;
4503 dma_addr_t dma_addr;
4504 phys_addr_t phys_addr;
4507 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4508 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4511 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4512 netdev_err(port->dev,
4513 "cannot allocate %d buffers for pool %d\n",
4514 buf_num, bm_pool->id);
4518 for (i = 0; i < buf_num; i++) {
4519 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4520 &phys_addr, GFP_KERNEL);
4524 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4528 /* Update BM driver with number of buffers added to pool */
4529 bm_pool->buf_num += i;
4531 netdev_dbg(port->dev,
4532 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4533 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4535 netdev_dbg(port->dev,
4536 "pool %d: %d of %d buffers added\n",
4537 bm_pool->id, i, buf_num);
4541 /* Notify the driver that BM pool is being used as specific type and return the
4542 * pool pointer on success
4544 static struct mvpp2_bm_pool *
4545 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
4547 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4550 if (pool >= MVPP2_BM_POOLS_NUM) {
4551 netdev_err(port->dev, "Invalid pool %d\n", pool);
4555 /* Allocate buffers in case BM pool is used as long pool, but packet
4556 * size doesn't match MTU or BM pool hasn't being used yet
4558 if (new_pool->pkt_size == 0) {
4561 /* Set default buffer number or free all the buffers in case
4562 * the pool is not empty
4564 pkts_num = new_pool->buf_num;
4566 pkts_num = mvpp2_pools[pool].buf_num;
4568 mvpp2_bm_bufs_free(port->dev->dev.parent,
4569 port->priv, new_pool, pkts_num);
4571 new_pool->pkt_size = pkt_size;
4572 new_pool->frag_size =
4573 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4574 MVPP2_SKB_SHINFO_SIZE;
4576 /* Allocate buffers for this pool */
4577 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4578 if (num != pkts_num) {
4579 WARN(1, "pool %d: %d of %d allocated\n",
4580 new_pool->id, num, pkts_num);
4585 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4586 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4591 /* Initialize pools for swf */
4592 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4595 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
4597 /* If port pkt_size is higher than 1518B:
4598 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4599 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4601 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
4602 long_log_pool = MVPP2_BM_JUMBO;
4603 short_log_pool = MVPP2_BM_LONG;
4605 long_log_pool = MVPP2_BM_LONG;
4606 short_log_pool = MVPP2_BM_SHORT;
4609 if (!port->pool_long) {
4611 mvpp2_bm_pool_use(port, long_log_pool,
4612 mvpp2_pools[long_log_pool].pkt_size);
4613 if (!port->pool_long)
4616 port->pool_long->port_map |= BIT(port->id);
4618 for (rxq = 0; rxq < port->nrxqs; rxq++)
4619 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4622 if (!port->pool_short) {
4624 mvpp2_bm_pool_use(port, short_log_pool,
4625 mvpp2_pools[short_log_pool].pkt_size);
4626 if (!port->pool_short)
4629 port->pool_short->port_map |= BIT(port->id);
4631 for (rxq = 0; rxq < port->nrxqs; rxq++)
4632 mvpp2_rxq_short_pool_set(port, rxq,
4633 port->pool_short->id);
4639 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4641 struct mvpp2_port *port = netdev_priv(dev);
4642 enum mvpp2_bm_pool_log_num new_long_pool;
4643 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4645 /* If port MTU is higher than 1518B:
4646 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4647 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4649 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
4650 new_long_pool = MVPP2_BM_JUMBO;
4652 new_long_pool = MVPP2_BM_LONG;
4654 if (new_long_pool != port->pool_long->id) {
4655 /* Remove port from old short & long pool */
4656 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
4657 port->pool_long->pkt_size);
4658 port->pool_long->port_map &= ~BIT(port->id);
4659 port->pool_long = NULL;
4661 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
4662 port->pool_short->pkt_size);
4663 port->pool_short->port_map &= ~BIT(port->id);
4664 port->pool_short = NULL;
4666 port->pkt_size = pkt_size;
4668 /* Add port to new short & long pool */
4669 mvpp2_swf_bm_pool_init(port);
4671 /* Update L4 checksum when jumbo enable/disable on port */
4672 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
4673 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4674 dev->hw_features &= ~(NETIF_F_IP_CSUM |
4677 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4678 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4683 dev->wanted_features = dev->features;
4685 netdev_update_features(dev);
4689 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4691 int i, sw_thread_mask = 0;
4693 for (i = 0; i < port->nqvecs; i++)
4694 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4696 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4697 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4700 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4702 int i, sw_thread_mask = 0;
4704 for (i = 0; i < port->nqvecs; i++)
4705 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4707 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4708 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4711 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4713 struct mvpp2_port *port = qvec->port;
4715 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4716 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4719 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4721 struct mvpp2_port *port = qvec->port;
4723 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4724 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4727 /* Mask the current CPU's Rx/Tx interrupts
4728 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4729 * using smp_processor_id() is OK.
4731 static void mvpp2_interrupts_mask(void *arg)
4733 struct mvpp2_port *port = arg;
4735 mvpp2_percpu_write(port->priv, smp_processor_id(),
4736 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4739 /* Unmask the current CPU's Rx/Tx interrupts.
4740 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4741 * using smp_processor_id() is OK.
4743 static void mvpp2_interrupts_unmask(void *arg)
4745 struct mvpp2_port *port = arg;
4748 val = MVPP2_CAUSE_MISC_SUM_MASK |
4749 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4750 if (port->has_tx_irqs)
4751 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4753 mvpp2_percpu_write(port->priv, smp_processor_id(),
4754 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4758 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4763 if (port->priv->hw_version != MVPP22)
4769 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4771 for (i = 0; i < port->nqvecs; i++) {
4772 struct mvpp2_queue_vector *v = port->qvecs + i;
4774 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4777 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4778 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4782 /* Port configuration routines */
4784 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4786 struct mvpp2 *priv = port->priv;
4789 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4790 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4791 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4793 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4794 if (port->gop_id == 2)
4795 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4796 else if (port->gop_id == 3)
4797 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4798 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4801 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4803 struct mvpp2 *priv = port->priv;
4806 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4807 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4808 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4809 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4811 if (port->gop_id > 1) {
4812 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4813 if (port->gop_id == 2)
4814 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4815 else if (port->gop_id == 3)
4816 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4817 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4821 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4823 struct mvpp2 *priv = port->priv;
4824 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4825 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4829 val = readl(xpcs + MVPP22_XPCS_CFG0);
4830 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4831 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4832 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4833 writel(val, xpcs + MVPP22_XPCS_CFG0);
4836 val = readl(mpcs + MVPP22_MPCS_CTRL);
4837 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4838 writel(val, mpcs + MVPP22_MPCS_CTRL);
4840 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4841 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4842 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4843 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4844 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4846 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4847 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4848 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4851 static int mvpp22_gop_init(struct mvpp2_port *port)
4853 struct mvpp2 *priv = port->priv;
4856 if (!priv->sysctrl_base)
4859 switch (port->phy_interface) {
4860 case PHY_INTERFACE_MODE_RGMII:
4861 case PHY_INTERFACE_MODE_RGMII_ID:
4862 case PHY_INTERFACE_MODE_RGMII_RXID:
4863 case PHY_INTERFACE_MODE_RGMII_TXID:
4864 if (port->gop_id == 0)
4866 mvpp22_gop_init_rgmii(port);
4868 case PHY_INTERFACE_MODE_SGMII:
4869 mvpp22_gop_init_sgmii(port);
4871 case PHY_INTERFACE_MODE_10GKR:
4872 if (port->gop_id != 0)
4874 mvpp22_gop_init_10gkr(port);
4877 goto unsupported_conf;
4880 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4881 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4882 GENCONF_PORT_CTRL1_EN(port->gop_id);
4883 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4885 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4886 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4887 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4889 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4890 val |= GENCONF_SOFT_RESET1_GOP;
4891 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4897 netdev_err(port->dev, "Invalid port configuration\n");
4901 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4905 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4906 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4907 /* Enable the GMAC link status irq for this port */
4908 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4909 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4910 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4913 if (port->gop_id == 0) {
4914 /* Enable the XLG/GIG irqs for this port */
4915 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4916 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4917 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4919 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4920 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4924 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4928 if (port->gop_id == 0) {
4929 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4930 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4931 MVPP22_XLG_EXT_INT_MASK_GIG);
4932 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4935 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4936 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4937 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4938 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4939 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4943 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4947 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4948 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4949 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4950 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4951 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4954 if (port->gop_id == 0) {
4955 val = readl(port->base + MVPP22_XLG_INT_MASK);
4956 val |= MVPP22_XLG_INT_MASK_LINK;
4957 writel(val, port->base + MVPP22_XLG_INT_MASK);
4960 mvpp22_gop_unmask_irq(port);
4963 static int mvpp22_comphy_init(struct mvpp2_port *port)
4971 switch (port->phy_interface) {
4972 case PHY_INTERFACE_MODE_SGMII:
4973 mode = PHY_MODE_SGMII;
4975 case PHY_INTERFACE_MODE_10GKR:
4976 mode = PHY_MODE_10GKR;
4982 ret = phy_set_mode(port->comphy, mode);
4986 return phy_power_on(port->comphy);
4989 static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4993 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4994 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4995 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4996 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4997 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4998 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4999 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
5000 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5001 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5002 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5003 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5004 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
5005 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
5008 /* The port is connected to a copper PHY */
5009 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5010 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5011 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5013 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5014 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5015 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5016 MVPP2_GMAC_AN_DUPLEX_EN;
5017 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5018 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
5019 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5022 static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
5026 /* Force link down */
5027 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5028 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5029 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5030 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5032 /* Set the GMAC in a reset state */
5033 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5034 val |= MVPP2_GMAC_PORT_RESET_MASK;
5035 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5037 /* Configure the PCS and in-band AN */
5038 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5039 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5040 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
5041 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
5042 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
5044 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5046 mvpp2_port_mii_gmac_configure_mode(port);
5048 /* Unset the GMAC reset state */
5049 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5050 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
5051 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5053 /* Stop forcing link down */
5054 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5055 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5056 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5059 static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
5063 if (port->gop_id != 0)
5066 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5067 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5068 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5070 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5071 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5072 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5073 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5076 static void mvpp22_port_mii_set(struct mvpp2_port *port)
5080 /* Only GOP port 0 has an XLG MAC */
5081 if (port->gop_id == 0) {
5082 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5083 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
5085 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5086 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5087 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5089 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5091 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5095 static void mvpp2_port_mii_set(struct mvpp2_port *port)
5097 if (port->priv->hw_version == MVPP22)
5098 mvpp22_port_mii_set(port);
5100 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
5101 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5102 mvpp2_port_mii_gmac_configure(port);
5103 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5104 mvpp2_port_mii_xlg_configure(port);
5107 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5111 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5112 val |= MVPP2_GMAC_FC_ADV_EN;
5113 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5116 static void mvpp2_port_enable(struct mvpp2_port *port)
5120 /* Only GOP port 0 has an XLG MAC */
5121 if (port->gop_id == 0 &&
5122 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5123 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5124 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5125 val |= MVPP22_XLG_CTRL0_PORT_EN |
5126 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5127 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5128 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5130 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5131 val |= MVPP2_GMAC_PORT_EN_MASK;
5132 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5133 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5137 static void mvpp2_port_disable(struct mvpp2_port *port)
5141 /* Only GOP port 0 has an XLG MAC */
5142 if (port->gop_id == 0 &&
5143 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5144 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5145 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5146 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5147 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5148 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5150 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5151 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5152 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5156 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5157 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5161 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5162 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5163 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5166 /* Configure loopback port */
5167 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5171 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5173 if (port->speed == 1000)
5174 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5176 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5178 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5179 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5181 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5183 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5186 struct mvpp2_ethtool_counter {
5187 unsigned int offset;
5188 const char string[ETH_GSTRING_LEN];
5192 static u64 mvpp2_read_count(struct mvpp2_port *port,
5193 const struct mvpp2_ethtool_counter *counter)
5197 val = readl(port->stats_base + counter->offset);
5198 if (counter->reg_is_64b)
5199 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5204 /* Due to the fact that software statistics and hardware statistics are, by
5205 * design, incremented at different moments in the chain of packet processing,
5206 * it is very likely that incoming packets could have been dropped after being
5207 * counted by hardware but before reaching software statistics (most probably
5208 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5209 * are added in between as well as TSO skb will be split and header bytes added.
5210 * Hence, statistics gathered from userspace with ifconfig (software) and
5211 * ethtool (hardware) cannot be compared.
5213 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5214 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5215 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5216 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5217 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5218 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5219 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5220 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5221 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5222 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5223 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5224 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5225 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5226 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5227 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5228 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5229 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5230 { MVPP2_MIB_FC_SENT, "fc_sent" },
5231 { MVPP2_MIB_FC_RCVD, "fc_received" },
5232 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5233 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5234 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5235 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5236 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5237 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5238 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5239 { MVPP2_MIB_COLLISION, "collision" },
5240 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5243 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5246 if (sset == ETH_SS_STATS) {
5249 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5250 memcpy(data + i * ETH_GSTRING_LEN,
5251 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5255 static void mvpp2_gather_hw_statistics(struct work_struct *work)
5257 struct delayed_work *del_work = to_delayed_work(work);
5258 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5263 mutex_lock(&port->gather_stats_lock);
5265 pstats = port->ethtool_stats;
5266 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5267 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5269 /* No need to read again the counters right after this function if it
5270 * was called asynchronously by the user (ie. use of ethtool).
5272 cancel_delayed_work(&port->stats_work);
5273 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
5274 MVPP2_MIB_COUNTERS_STATS_DELAY);
5276 mutex_unlock(&port->gather_stats_lock);
5279 static void mvpp2_ethtool_get_stats(struct net_device *dev,
5280 struct ethtool_stats *stats, u64 *data)
5282 struct mvpp2_port *port = netdev_priv(dev);
5284 /* Update statistics for the given port, then take the lock to avoid
5285 * concurrent accesses on the ethtool_stats structure during its copy.
5287 mvpp2_gather_hw_statistics(&port->stats_work.work);
5289 mutex_lock(&port->gather_stats_lock);
5290 memcpy(data, port->ethtool_stats,
5291 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
5292 mutex_unlock(&port->gather_stats_lock);
5295 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5297 if (sset == ETH_SS_STATS)
5298 return ARRAY_SIZE(mvpp2_ethtool_regs);
5303 static void mvpp2_port_reset(struct mvpp2_port *port)
5308 /* Read the GOP statistics to reset the hardware counters */
5309 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5310 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5312 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5313 ~MVPP2_GMAC_PORT_RESET_MASK;
5314 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5316 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5317 MVPP2_GMAC_PORT_RESET_MASK)
5321 /* Change maximum receive size of the port */
5322 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5326 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5327 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5328 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5329 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5330 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5333 /* Change maximum receive size of the port */
5334 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5338 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5339 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5340 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5341 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
5342 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5345 /* Set defaults to the MVPP2 port */
5346 static void mvpp2_defaults_set(struct mvpp2_port *port)
5348 int tx_port_num, val, queue, ptxq, lrxq;
5350 if (port->priv->hw_version == MVPP21) {
5351 /* Configure port to loopback if needed */
5352 if (port->flags & MVPP2_F_LOOPBACK)
5353 mvpp2_port_loopback_set(port);
5355 /* Update TX FIFO MIN Threshold */
5356 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5357 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5358 /* Min. TX threshold must be less than minimal packet length */
5359 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5360 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5363 /* Disable Legacy WRR, Disable EJP, Release from reset */
5364 tx_port_num = mvpp2_egress_port(port);
5365 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5367 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5369 /* Close bandwidth for all queues */
5370 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5371 ptxq = mvpp2_txq_phys(port->id, queue);
5372 mvpp2_write(port->priv,
5373 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5376 /* Set refill period to 1 usec, refill tokens
5377 * and bucket size to maximum
5379 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5380 port->priv->tclk / USEC_PER_SEC);
5381 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5382 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5383 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5384 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5385 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5386 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5387 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5389 /* Set MaximumLowLatencyPacketSize value to 256 */
5390 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5391 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5392 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5394 /* Enable Rx cache snoop */
5395 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5396 queue = port->rxqs[lrxq]->id;
5397 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5398 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5399 MVPP2_SNOOP_BUF_HDR_MASK;
5400 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5403 /* At default, mask all interrupts to all present cpus */
5404 mvpp2_interrupts_disable(port);
5407 /* Enable/disable receiving packets */
5408 static void mvpp2_ingress_enable(struct mvpp2_port *port)
5413 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5414 queue = port->rxqs[lrxq]->id;
5415 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5416 val &= ~MVPP2_RXQ_DISABLE_MASK;
5417 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5421 static void mvpp2_ingress_disable(struct mvpp2_port *port)
5426 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5427 queue = port->rxqs[lrxq]->id;
5428 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5429 val |= MVPP2_RXQ_DISABLE_MASK;
5430 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5434 /* Enable transmit via physical egress queue
5435 * - HW starts take descriptors from DRAM
5437 static void mvpp2_egress_enable(struct mvpp2_port *port)
5441 int tx_port_num = mvpp2_egress_port(port);
5443 /* Enable all initialized TXs. */
5445 for (queue = 0; queue < port->ntxqs; queue++) {
5446 struct mvpp2_tx_queue *txq = port->txqs[queue];
5449 qmap |= (1 << queue);
5452 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5453 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5456 /* Disable transmit via physical egress queue
5457 * - HW doesn't take descriptors from DRAM
5459 static void mvpp2_egress_disable(struct mvpp2_port *port)
5463 int tx_port_num = mvpp2_egress_port(port);
5465 /* Issue stop command for active channels only */
5466 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5467 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5468 MVPP2_TXP_SCHED_ENQ_MASK;
5470 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5471 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5473 /* Wait for all Tx activity to terminate. */
5476 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5477 netdev_warn(port->dev,
5478 "Tx stop timed out, status=0x%08x\n",
5485 /* Check port TX Command register that all
5486 * Tx queues are stopped
5488 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5489 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5492 /* Rx descriptors helper methods */
5494 /* Get number of Rx descriptors occupied by received packets */
5496 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5498 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5500 return val & MVPP2_RXQ_OCCUPIED_MASK;
5503 /* Update Rx queue status with the number of occupied and available
5504 * Rx descriptor slots.
5507 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5508 int used_count, int free_count)
5510 /* Decrement the number of used descriptors and increment count
5511 * increment the number of free descriptors.
5513 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5515 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5518 /* Get pointer to next RX descriptor to be processed by SW */
5519 static inline struct mvpp2_rx_desc *
5520 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5522 int rx_desc = rxq->next_desc_to_proc;
5524 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5525 prefetch(rxq->descs + rxq->next_desc_to_proc);
5526 return rxq->descs + rx_desc;
5529 /* Set rx queue offset */
5530 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5531 int prxq, int offset)
5535 /* Convert offset from bytes to units of 32 bytes */
5536 offset = offset >> 5;
5538 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5539 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5542 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5543 MVPP2_RXQ_PACKET_OFFSET_MASK);
5545 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5548 /* Tx descriptors helper methods */
5550 /* Get pointer to next Tx descriptor to be processed (send) by HW */
5551 static struct mvpp2_tx_desc *
5552 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5554 int tx_desc = txq->next_desc_to_proc;
5556 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5557 return txq->descs + tx_desc;
5560 /* Update HW with number of aggregated Tx descriptors to be sent
5562 * Called only from mvpp2_tx(), so migration is disabled, using
5563 * smp_processor_id() is OK.
5565 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5567 /* aggregated access - relevant TXQ number is written in TX desc */
5568 mvpp2_percpu_write(port->priv, smp_processor_id(),
5569 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
5573 /* Check if there are enough free descriptors in aggregated txq.
5574 * If not, update the number of occupied descriptors and repeat the check.
5576 * Called only from mvpp2_tx(), so migration is disabled, using
5577 * smp_processor_id() is OK.
5579 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5580 struct mvpp2_tx_queue *aggr_txq, int num)
5582 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
5583 /* Update number of occupied aggregated Tx descriptors */
5584 int cpu = smp_processor_id();
5585 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5587 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5590 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
5596 /* Reserved Tx descriptors allocation request
5598 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5599 * only by mvpp2_tx(), so migration is disabled, using
5600 * smp_processor_id() is OK.
5602 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5603 struct mvpp2_tx_queue *txq, int num)
5606 int cpu = smp_processor_id();
5608 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
5609 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
5611 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
5613 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5616 /* Check if there are enough reserved descriptors for transmission.
5617 * If not, request chunk of reserved descriptors and check again.
5619 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5620 struct mvpp2_tx_queue *txq,
5621 struct mvpp2_txq_pcpu *txq_pcpu,
5624 int req, cpu, desc_count;
5626 if (txq_pcpu->reserved_num >= num)
5629 /* Not enough descriptors reserved! Update the reserved descriptor
5630 * count and check again.
5634 /* Compute total of used descriptors */
5635 for_each_present_cpu(cpu) {
5636 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5638 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5639 desc_count += txq_pcpu_aux->count;
5640 desc_count += txq_pcpu_aux->reserved_num;
5643 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5647 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5650 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5652 /* OK, the descriptor cound has been updated: check again. */
5653 if (txq_pcpu->reserved_num < num)
5658 /* Release the last allocated Tx descriptor. Useful to handle DMA
5659 * mapping failures in the Tx path.
5661 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5663 if (txq->next_desc_to_proc == 0)
5664 txq->next_desc_to_proc = txq->last_desc - 1;
5666 txq->next_desc_to_proc--;
5669 /* Set Tx descriptors fields relevant for CSUM calculation */
5670 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5671 int ip_hdr_len, int l4_proto)
5675 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5676 * G_L4_chk, L4_type required only for checksum calculation
5678 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5679 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5680 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5682 if (l3_proto == swab16(ETH_P_IP)) {
5683 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5684 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5686 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5689 if (l4_proto == IPPROTO_TCP) {
5690 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5691 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5692 } else if (l4_proto == IPPROTO_UDP) {
5693 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5694 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5696 command |= MVPP2_TXD_L4_CSUM_NOT;
5702 /* Get number of sent descriptors and decrement counter.
5703 * The number of sent descriptors is returned.
5706 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5707 * (migration disabled) and from the TX completion tasklet (migration
5708 * disabled) so using smp_processor_id() is OK.
5710 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5711 struct mvpp2_tx_queue *txq)
5715 /* Reading status reg resets transmitted descriptor counter */
5716 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5717 MVPP2_TXQ_SENT_REG(txq->id));
5719 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5720 MVPP2_TRANSMITTED_COUNT_OFFSET;
5723 /* Called through on_each_cpu(), so runs on all CPUs, with migration
5724 * disabled, therefore using smp_processor_id() is OK.
5726 static void mvpp2_txq_sent_counter_clear(void *arg)
5728 struct mvpp2_port *port = arg;
5731 for (queue = 0; queue < port->ntxqs; queue++) {
5732 int id = port->txqs[queue]->id;
5734 mvpp2_percpu_read(port->priv, smp_processor_id(),
5735 MVPP2_TXQ_SENT_REG(id));
5739 /* Set max sizes for Tx queues */
5740 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5743 int txq, tx_port_num;
5745 mtu = port->pkt_size * 8;
5746 if (mtu > MVPP2_TXP_MTU_MAX)
5747 mtu = MVPP2_TXP_MTU_MAX;
5749 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5752 /* Indirect access to registers */
5753 tx_port_num = mvpp2_egress_port(port);
5754 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5757 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5758 val &= ~MVPP2_TXP_MTU_MAX;
5760 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5762 /* TXP token size and all TXQs token size must be larger that MTU */
5763 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5764 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5767 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5769 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5772 for (txq = 0; txq < port->ntxqs; txq++) {
5773 val = mvpp2_read(port->priv,
5774 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5775 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5779 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5781 mvpp2_write(port->priv,
5782 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5788 /* Set the number of packets that will be received before Rx interrupt
5789 * will be generated by HW.
5791 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
5792 struct mvpp2_rx_queue *rxq)
5794 int cpu = get_cpu();
5796 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5797 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
5799 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5800 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5806 /* For some reason in the LSP this is done on each CPU. Why ? */
5807 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5808 struct mvpp2_tx_queue *txq)
5810 int cpu = get_cpu();
5813 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5814 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5816 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5817 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5818 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5823 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5825 u64 tmp = (u64)clk_hz * usec;
5827 do_div(tmp, USEC_PER_SEC);
5829 return tmp > U32_MAX ? U32_MAX : tmp;
5832 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5834 u64 tmp = (u64)cycles * USEC_PER_SEC;
5836 do_div(tmp, clk_hz);
5838 return tmp > U32_MAX ? U32_MAX : tmp;
5841 /* Set the time delay in usec before Rx interrupt */
5842 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
5843 struct mvpp2_rx_queue *rxq)
5845 unsigned long freq = port->priv->tclk;
5846 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5848 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5850 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5852 /* re-evaluate to get actual register value */
5853 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5856 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
5859 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5861 unsigned long freq = port->priv->tclk;
5862 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5864 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5865 port->tx_time_coal =
5866 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5868 /* re-evaluate to get actual register value */
5869 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5872 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5875 /* Free Tx queue skbuffs */
5876 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5877 struct mvpp2_tx_queue *txq,
5878 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5882 for (i = 0; i < num; i++) {
5883 struct mvpp2_txq_pcpu_buf *tx_buf =
5884 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5886 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5887 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5888 tx_buf->size, DMA_TO_DEVICE);
5890 dev_kfree_skb_any(tx_buf->skb);
5892 mvpp2_txq_inc_get(txq_pcpu);
5896 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5899 int queue = fls(cause) - 1;
5901 return port->rxqs[queue];
5904 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5907 int queue = fls(cause) - 1;
5909 return port->txqs[queue];
5912 /* Handle end of transmission */
5913 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5914 struct mvpp2_txq_pcpu *txq_pcpu)
5916 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5919 if (txq_pcpu->cpu != smp_processor_id())
5920 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5922 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5925 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5927 txq_pcpu->count -= tx_done;
5929 if (netif_tx_queue_stopped(nq))
5930 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
5931 netif_tx_wake_queue(nq);
5934 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5937 struct mvpp2_tx_queue *txq;
5938 struct mvpp2_txq_pcpu *txq_pcpu;
5939 unsigned int tx_todo = 0;
5942 txq = mvpp2_get_tx_queue(port, cause);
5946 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5948 if (txq_pcpu->count) {
5949 mvpp2_txq_done(port, txq, txq_pcpu);
5950 tx_todo += txq_pcpu->count;
5953 cause &= ~(1 << txq->log_id);
5958 /* Rx/Tx queue initialization/cleanup methods */
5960 /* Allocate and initialize descriptors for aggr TXQ */
5961 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5962 struct mvpp2_tx_queue *aggr_txq, int cpu,
5967 /* Allocate memory for TX descriptors */
5968 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5969 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5970 &aggr_txq->descs_dma, GFP_KERNEL);
5971 if (!aggr_txq->descs)
5974 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
5976 /* Aggr TXQ no reset WA */
5977 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5978 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5980 /* Set Tx descriptors queue starting address indirect
5983 if (priv->hw_version == MVPP21)
5984 txq_dma = aggr_txq->descs_dma;
5986 txq_dma = aggr_txq->descs_dma >>
5987 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5989 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5990 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5991 MVPP2_AGGR_TXQ_SIZE);
5996 /* Create a specified Rx queue */
5997 static int mvpp2_rxq_init(struct mvpp2_port *port,
5998 struct mvpp2_rx_queue *rxq)
6004 rxq->size = port->rx_ring_size;
6006 /* Allocate memory for RX descriptors */
6007 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
6008 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6009 &rxq->descs_dma, GFP_KERNEL);
6013 rxq->last_desc = rxq->size - 1;
6015 /* Zero occupied and non-occupied counters - direct access */
6016 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6018 /* Set Rx descriptors queue starting address - indirect access */
6020 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6021 if (port->priv->hw_version == MVPP21)
6022 rxq_dma = rxq->descs_dma;
6024 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
6025 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
6026 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
6027 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
6031 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
6033 /* Set coalescing pkts and time */
6034 mvpp2_rx_pkts_coal_set(port, rxq);
6035 mvpp2_rx_time_coal_set(port, rxq);
6037 /* Add number of descriptors ready for receiving packets */
6038 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
6043 /* Push packets received by the RXQ to BM pool */
6044 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
6045 struct mvpp2_rx_queue *rxq)
6049 rx_received = mvpp2_rxq_received(port, rxq->id);
6053 for (i = 0; i < rx_received; i++) {
6054 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6055 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6058 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6059 MVPP2_RXD_BM_POOL_ID_OFFS;
6061 mvpp2_bm_pool_put(port, pool,
6062 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
6063 mvpp2_rxdesc_cookie_get(port, rx_desc));
6065 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
6068 /* Cleanup Rx queue */
6069 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6070 struct mvpp2_rx_queue *rxq)
6074 mvpp2_rxq_drop_pkts(port, rxq);
6077 dma_free_coherent(port->dev->dev.parent,
6078 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6084 rxq->next_desc_to_proc = 0;
6087 /* Clear Rx descriptors queue starting address and size;
6088 * free descriptor number
6090 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6092 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6093 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6094 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
6098 /* Create and initialize a Tx queue */
6099 static int mvpp2_txq_init(struct mvpp2_port *port,
6100 struct mvpp2_tx_queue *txq)
6103 int cpu, desc, desc_per_txq, tx_port_num;
6104 struct mvpp2_txq_pcpu *txq_pcpu;
6106 txq->size = port->tx_ring_size;
6108 /* Allocate memory for Tx descriptors */
6109 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6110 txq->size * MVPP2_DESC_ALIGNED_SIZE,
6111 &txq->descs_dma, GFP_KERNEL);
6115 txq->last_desc = txq->size - 1;
6117 /* Set Tx descriptors queue starting address - indirect access */
6119 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6120 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6122 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6123 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6124 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6125 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6126 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6127 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
6128 val &= ~MVPP2_TXQ_PENDING_MASK;
6129 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
6131 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6132 * for each existing TXQ.
6133 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6134 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6137 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6138 (txq->log_id * desc_per_txq);
6140 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6141 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6142 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
6145 /* WRR / EJP configuration - indirect access */
6146 tx_port_num = mvpp2_egress_port(port);
6147 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6149 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6150 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6151 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6152 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6153 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6155 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6156 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6159 for_each_present_cpu(cpu) {
6160 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6161 txq_pcpu->size = txq->size;
6162 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6163 sizeof(*txq_pcpu->buffs),
6165 if (!txq_pcpu->buffs)
6168 txq_pcpu->count = 0;
6169 txq_pcpu->reserved_num = 0;
6170 txq_pcpu->txq_put_index = 0;
6171 txq_pcpu->txq_get_index = 0;
6172 txq_pcpu->tso_headers = NULL;
6174 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6175 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6177 txq_pcpu->tso_headers =
6178 dma_alloc_coherent(port->dev->dev.parent,
6179 txq_pcpu->size * TSO_HEADER_SIZE,
6180 &txq_pcpu->tso_headers_dma,
6182 if (!txq_pcpu->tso_headers)
6189 /* Free allocated TXQ resources */
6190 static void mvpp2_txq_deinit(struct mvpp2_port *port,
6191 struct mvpp2_tx_queue *txq)
6193 struct mvpp2_txq_pcpu *txq_pcpu;
6196 for_each_present_cpu(cpu) {
6197 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6198 kfree(txq_pcpu->buffs);
6200 if (txq_pcpu->tso_headers)
6201 dma_free_coherent(port->dev->dev.parent,
6202 txq_pcpu->size * TSO_HEADER_SIZE,
6203 txq_pcpu->tso_headers,
6204 txq_pcpu->tso_headers_dma);
6206 txq_pcpu->tso_headers = NULL;
6210 dma_free_coherent(port->dev->dev.parent,
6211 txq->size * MVPP2_DESC_ALIGNED_SIZE,
6212 txq->descs, txq->descs_dma);
6216 txq->next_desc_to_proc = 0;
6219 /* Set minimum bandwidth for disabled TXQs */
6220 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6222 /* Set Tx descriptors queue starting address and size */
6224 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6225 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6226 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
6230 /* Cleanup Tx ports */
6231 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6233 struct mvpp2_txq_pcpu *txq_pcpu;
6234 int delay, pending, cpu;
6238 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6239 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
6240 val |= MVPP2_TXQ_DRAIN_EN_MASK;
6241 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6243 /* The napi queue has been stopped so wait for all packets
6244 * to be transmitted.
6248 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6249 netdev_warn(port->dev,
6250 "port %d: cleaning queue %d timed out\n",
6251 port->id, txq->log_id);
6257 pending = mvpp2_percpu_read(port->priv, cpu,
6258 MVPP2_TXQ_PENDING_REG);
6259 pending &= MVPP2_TXQ_PENDING_MASK;
6262 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
6263 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6266 for_each_present_cpu(cpu) {
6267 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6269 /* Release all packets */
6270 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6273 txq_pcpu->count = 0;
6274 txq_pcpu->txq_put_index = 0;
6275 txq_pcpu->txq_get_index = 0;
6279 /* Cleanup all Tx queues */
6280 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6282 struct mvpp2_tx_queue *txq;
6286 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6288 /* Reset Tx ports and delete Tx queues */
6289 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6290 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6292 for (queue = 0; queue < port->ntxqs; queue++) {
6293 txq = port->txqs[queue];
6294 mvpp2_txq_clean(port, txq);
6295 mvpp2_txq_deinit(port, txq);
6298 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6300 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6301 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6304 /* Cleanup all Rx queues */
6305 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6309 for (queue = 0; queue < port->nrxqs; queue++)
6310 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6313 /* Init all Rx queues for port */
6314 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6318 for (queue = 0; queue < port->nrxqs; queue++) {
6319 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6326 mvpp2_cleanup_rxqs(port);
6330 /* Init all tx queues for port */
6331 static int mvpp2_setup_txqs(struct mvpp2_port *port)
6333 struct mvpp2_tx_queue *txq;
6336 for (queue = 0; queue < port->ntxqs; queue++) {
6337 txq = port->txqs[queue];
6338 err = mvpp2_txq_init(port, txq);
6343 if (port->has_tx_irqs) {
6344 mvpp2_tx_time_coal_set(port);
6345 for (queue = 0; queue < port->ntxqs; queue++) {
6346 txq = port->txqs[queue];
6347 mvpp2_tx_pkts_coal_set(port, txq);
6351 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6355 mvpp2_cleanup_txqs(port);
6359 /* The callback for per-port interrupt */
6360 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6362 struct mvpp2_queue_vector *qv = dev_id;
6364 mvpp2_qvec_interrupt_disable(qv);
6366 napi_schedule(&qv->napi);
6371 /* Per-port interrupt for link status changes */
6372 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6374 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6375 struct net_device *dev = port->dev;
6376 bool event = false, link = false;
6379 mvpp22_gop_mask_irq(port);
6381 if (port->gop_id == 0 &&
6382 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6383 val = readl(port->base + MVPP22_XLG_INT_STAT);
6384 if (val & MVPP22_XLG_INT_STAT_LINK) {
6386 val = readl(port->base + MVPP22_XLG_STATUS);
6387 if (val & MVPP22_XLG_STATUS_LINK_UP)
6390 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6391 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6392 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6393 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6395 val = readl(port->base + MVPP2_GMAC_STATUS0);
6396 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6401 if (!netif_running(dev) || !event)
6405 mvpp2_interrupts_enable(port);
6407 mvpp2_egress_enable(port);
6408 mvpp2_ingress_enable(port);
6409 netif_carrier_on(dev);
6410 netif_tx_wake_all_queues(dev);
6412 netif_tx_stop_all_queues(dev);
6413 netif_carrier_off(dev);
6414 mvpp2_ingress_disable(port);
6415 mvpp2_egress_disable(port);
6417 mvpp2_interrupts_disable(port);
6421 mvpp22_gop_unmask_irq(port);
6425 static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6426 struct phy_device *phydev)
6430 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6431 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6432 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6433 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6434 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6437 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6438 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6439 MVPP2_GMAC_CONFIG_GMII_SPEED |
6440 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6441 MVPP2_GMAC_AN_SPEED_EN |
6442 MVPP2_GMAC_AN_DUPLEX_EN);
6445 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6447 if (phydev->speed == SPEED_1000)
6448 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6449 else if (phydev->speed == SPEED_100)
6450 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6452 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6456 static void mvpp2_link_event(struct net_device *dev)
6458 struct mvpp2_port *port = netdev_priv(dev);
6459 struct phy_device *phydev = dev->phydev;
6460 bool link_reconfigured = false;
6464 if (port->phy_interface != phydev->interface && port->comphy) {
6465 /* disable current port for reconfiguration */
6466 mvpp2_interrupts_disable(port);
6467 netif_carrier_off(port->dev);
6468 mvpp2_port_disable(port);
6469 phy_power_off(port->comphy);
6471 /* comphy reconfiguration */
6472 port->phy_interface = phydev->interface;
6473 mvpp22_comphy_init(port);
6475 /* gop/mac reconfiguration */
6476 mvpp22_gop_init(port);
6477 mvpp2_port_mii_set(port);
6479 link_reconfigured = true;
6482 if ((port->speed != phydev->speed) ||
6483 (port->duplex != phydev->duplex)) {
6484 mvpp2_gmac_set_autoneg(port, phydev);
6486 port->duplex = phydev->duplex;
6487 port->speed = phydev->speed;
6491 if (phydev->link != port->link || link_reconfigured) {
6492 port->link = phydev->link;
6495 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6496 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6497 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6498 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6499 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6500 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6501 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6502 MVPP2_GMAC_FORCE_LINK_DOWN);
6503 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6506 mvpp2_interrupts_enable(port);
6507 mvpp2_port_enable(port);
6509 mvpp2_egress_enable(port);
6510 mvpp2_ingress_enable(port);
6511 netif_carrier_on(dev);
6512 netif_tx_wake_all_queues(dev);
6517 netif_tx_stop_all_queues(dev);
6518 netif_carrier_off(dev);
6519 mvpp2_ingress_disable(port);
6520 mvpp2_egress_disable(port);
6522 mvpp2_port_disable(port);
6523 mvpp2_interrupts_disable(port);
6526 phy_print_status(phydev);
6530 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6534 if (!port_pcpu->timer_scheduled) {
6535 port_pcpu->timer_scheduled = true;
6536 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
6537 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6538 HRTIMER_MODE_REL_PINNED);
6542 static void mvpp2_tx_proc_cb(unsigned long data)
6544 struct net_device *dev = (struct net_device *)data;
6545 struct mvpp2_port *port = netdev_priv(dev);
6546 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6547 unsigned int tx_todo, cause;
6549 if (!netif_running(dev))
6551 port_pcpu->timer_scheduled = false;
6553 /* Process all the Tx queues */
6554 cause = (1 << port->ntxqs) - 1;
6555 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
6557 /* Set the timer in case not all the packets were processed */
6559 mvpp2_timer_set(port_pcpu);
6562 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6564 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6565 struct mvpp2_port_pcpu,
6568 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6570 return HRTIMER_NORESTART;
6573 /* Main RX/TX processing routines */
6575 /* Display more error info */
6576 static void mvpp2_rx_error(struct mvpp2_port *port,
6577 struct mvpp2_rx_desc *rx_desc)
6579 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6580 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
6582 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6583 case MVPP2_RXD_ERR_CRC:
6584 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6587 case MVPP2_RXD_ERR_OVERRUN:
6588 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6591 case MVPP2_RXD_ERR_RESOURCE:
6592 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6598 /* Handle RX checksum offload */
6599 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6600 struct sk_buff *skb)
6602 if (((status & MVPP2_RXD_L3_IP4) &&
6603 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6604 (status & MVPP2_RXD_L3_IP6))
6605 if (((status & MVPP2_RXD_L4_UDP) ||
6606 (status & MVPP2_RXD_L4_TCP)) &&
6607 (status & MVPP2_RXD_L4_CSUM_OK)) {
6609 skb->ip_summed = CHECKSUM_UNNECESSARY;
6613 skb->ip_summed = CHECKSUM_NONE;
6616 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6617 static int mvpp2_rx_refill(struct mvpp2_port *port,
6618 struct mvpp2_bm_pool *bm_pool, int pool)
6620 dma_addr_t dma_addr;
6621 phys_addr_t phys_addr;
6624 /* No recycle or too many buffers are in use, so allocate a new skb */
6625 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6630 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6635 /* Handle tx checksum */
6636 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6638 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6642 if (skb->protocol == htons(ETH_P_IP)) {
6643 struct iphdr *ip4h = ip_hdr(skb);
6645 /* Calculate IPv4 checksum and L4 checksum */
6646 ip_hdr_len = ip4h->ihl;
6647 l4_proto = ip4h->protocol;
6648 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6649 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6651 /* Read l4_protocol from one of IPv6 extra headers */
6652 if (skb_network_header_len(skb) > 0)
6653 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6654 l4_proto = ip6h->nexthdr;
6656 return MVPP2_TXD_L4_CSUM_NOT;
6659 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6660 skb->protocol, ip_hdr_len, l4_proto);
6663 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6666 /* Main rx processing */
6667 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6668 int rx_todo, struct mvpp2_rx_queue *rxq)
6670 struct net_device *dev = port->dev;
6676 /* Get number of received packets and clamp the to-do */
6677 rx_received = mvpp2_rxq_received(port, rxq->id);
6678 if (rx_todo > rx_received)
6679 rx_todo = rx_received;
6681 while (rx_done < rx_todo) {
6682 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6683 struct mvpp2_bm_pool *bm_pool;
6684 struct sk_buff *skb;
6685 unsigned int frag_size;
6686 dma_addr_t dma_addr;
6687 phys_addr_t phys_addr;
6689 int pool, rx_bytes, err;
6693 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6694 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6695 rx_bytes -= MVPP2_MH_SIZE;
6696 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6697 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6698 data = (void *)phys_to_virt(phys_addr);
6700 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6701 MVPP2_RXD_BM_POOL_ID_OFFS;
6702 bm_pool = &port->priv->bm_pools[pool];
6704 /* In case of an error, release the requested buffer pointer
6705 * to the Buffer Manager. This request process is controlled
6706 * by the hardware, and the information about the buffer is
6707 * comprised by the RX descriptor.
6709 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
6711 dev->stats.rx_errors++;
6712 mvpp2_rx_error(port, rx_desc);
6713 /* Return the buffer to the pool */
6714 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6718 if (bm_pool->frag_size > PAGE_SIZE)
6721 frag_size = bm_pool->frag_size;
6723 skb = build_skb(data, frag_size);
6725 netdev_warn(port->dev, "skb build failed\n");
6726 goto err_drop_frame;
6729 err = mvpp2_rx_refill(port, bm_pool, pool);
6731 netdev_err(port->dev, "failed to refill BM pools\n");
6732 goto err_drop_frame;
6735 dma_unmap_single(dev->dev.parent, dma_addr,
6736 bm_pool->buf_size, DMA_FROM_DEVICE);
6739 rcvd_bytes += rx_bytes;
6741 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
6742 skb_put(skb, rx_bytes);
6743 skb->protocol = eth_type_trans(skb, dev);
6744 mvpp2_rx_csum(port, rx_status, skb);
6746 napi_gro_receive(napi, skb);
6750 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6752 u64_stats_update_begin(&stats->syncp);
6753 stats->rx_packets += rcvd_pkts;
6754 stats->rx_bytes += rcvd_bytes;
6755 u64_stats_update_end(&stats->syncp);
6758 /* Update Rx queue management counters */
6760 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
6766 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6767 struct mvpp2_tx_desc *desc)
6769 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6771 dma_addr_t buf_dma_addr =
6772 mvpp2_txdesc_dma_addr_get(port, desc);
6774 mvpp2_txdesc_size_get(port, desc);
6775 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6776 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6777 buf_sz, DMA_TO_DEVICE);
6778 mvpp2_txq_desc_put(txq);
6781 /* Handle tx fragmentation processing */
6782 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6783 struct mvpp2_tx_queue *aggr_txq,
6784 struct mvpp2_tx_queue *txq)
6786 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6787 struct mvpp2_tx_desc *tx_desc;
6789 dma_addr_t buf_dma_addr;
6791 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6792 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6793 void *addr = page_address(frag->page.p) + frag->page_offset;
6795 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6796 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6797 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
6799 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
6802 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
6803 mvpp2_txq_desc_put(txq);
6807 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6809 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6810 /* Last descriptor */
6811 mvpp2_txdesc_cmd_set(port, tx_desc,
6813 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6815 /* Descriptor in the middle: Not First, Not Last */
6816 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6817 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6823 /* Release all descriptors that were used to map fragments of
6824 * this packet, as well as the corresponding DMA mappings
6826 for (i = i - 1; i >= 0; i--) {
6827 tx_desc = txq->descs + i;
6828 tx_desc_unmap_put(port, txq, tx_desc);
6834 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6835 struct net_device *dev,
6836 struct mvpp2_tx_queue *txq,
6837 struct mvpp2_tx_queue *aggr_txq,
6838 struct mvpp2_txq_pcpu *txq_pcpu,
6841 struct mvpp2_port *port = netdev_priv(dev);
6842 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6845 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6846 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6848 addr = txq_pcpu->tso_headers_dma +
6849 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6850 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
6852 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6854 MVPP2_TXD_PADDING_DISABLE);
6855 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6858 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6859 struct net_device *dev, struct tso_t *tso,
6860 struct mvpp2_tx_queue *txq,
6861 struct mvpp2_tx_queue *aggr_txq,
6862 struct mvpp2_txq_pcpu *txq_pcpu,
6863 int sz, bool left, bool last)
6865 struct mvpp2_port *port = netdev_priv(dev);
6866 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6867 dma_addr_t buf_dma_addr;
6869 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6870 mvpp2_txdesc_size_set(port, tx_desc, sz);
6872 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6874 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6875 mvpp2_txq_desc_put(txq);
6879 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6882 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6884 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6888 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6891 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6895 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6896 struct mvpp2_tx_queue *txq,
6897 struct mvpp2_tx_queue *aggr_txq,
6898 struct mvpp2_txq_pcpu *txq_pcpu)
6900 struct mvpp2_port *port = netdev_priv(dev);
6902 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6903 int i, len, descs = 0;
6905 /* Check number of available descriptors */
6906 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6907 tso_count_descs(skb)) ||
6908 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6909 tso_count_descs(skb)))
6912 tso_start(skb, &tso);
6913 len = skb->len - hdr_sz;
6915 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6916 char *hdr = txq_pcpu->tso_headers +
6917 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6922 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6923 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6926 int sz = min_t(int, tso.size, left);
6930 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6931 txq_pcpu, sz, left, len == 0))
6933 tso_build_data(skb, &tso, sz);
6940 for (i = descs - 1; i >= 0; i--) {
6941 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6942 tx_desc_unmap_put(port, txq, tx_desc);
6947 /* Main tx processing */
6948 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6950 struct mvpp2_port *port = netdev_priv(dev);
6951 struct mvpp2_tx_queue *txq, *aggr_txq;
6952 struct mvpp2_txq_pcpu *txq_pcpu;
6953 struct mvpp2_tx_desc *tx_desc;
6954 dma_addr_t buf_dma_addr;
6959 txq_id = skb_get_queue_mapping(skb);
6960 txq = port->txqs[txq_id];
6961 txq_pcpu = this_cpu_ptr(txq->pcpu);
6962 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6964 if (skb_is_gso(skb)) {
6965 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6968 frags = skb_shinfo(skb)->nr_frags + 1;
6970 /* Check number of available descriptors */
6971 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6972 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6978 /* Get a descriptor for the first part of the packet */
6979 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6980 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6981 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
6983 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
6984 skb_headlen(skb), DMA_TO_DEVICE);
6985 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6986 mvpp2_txq_desc_put(txq);
6991 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6993 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6996 /* First and Last descriptor */
6997 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
6998 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6999 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
7001 /* First but not Last */
7002 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
7003 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
7004 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
7006 /* Continue with other skb fragments */
7007 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
7008 tx_desc_unmap_put(port, txq, tx_desc);
7015 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
7016 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
7018 txq_pcpu->reserved_num -= frags;
7019 txq_pcpu->count += frags;
7020 aggr_txq->count += frags;
7022 /* Enable transmit */
7024 mvpp2_aggr_txq_pend_desc_add(port, frags);
7026 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
7027 netif_tx_stop_queue(nq);
7029 u64_stats_update_begin(&stats->syncp);
7030 stats->tx_packets++;
7031 stats->tx_bytes += skb->len;
7032 u64_stats_update_end(&stats->syncp);
7034 dev->stats.tx_dropped++;
7035 dev_kfree_skb_any(skb);
7038 /* Finalize TX processing */
7039 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
7040 mvpp2_txq_done(port, txq, txq_pcpu);
7042 /* Set the timer in case not all frags were processed */
7043 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
7044 txq_pcpu->count > 0) {
7045 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
7047 mvpp2_timer_set(port_pcpu);
7050 return NETDEV_TX_OK;
7053 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
7055 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
7056 netdev_err(dev, "FCS error\n");
7057 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
7058 netdev_err(dev, "rx fifo overrun error\n");
7059 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
7060 netdev_err(dev, "tx fifo underrun error\n");
7063 static int mvpp2_poll(struct napi_struct *napi, int budget)
7065 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
7067 struct mvpp2_port *port = netdev_priv(napi->dev);
7068 struct mvpp2_queue_vector *qv;
7069 int cpu = smp_processor_id();
7071 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7073 /* Rx/Tx cause register
7075 * Bits 0-15: each bit indicates received packets on the Rx queue
7076 * (bit 0 is for Rx queue 0).
7078 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7079 * (bit 16 is for Tx queue 0).
7081 * Each CPU has its own Rx/Tx cause register
7083 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
7084 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
7086 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
7088 mvpp2_cause_error(port->dev, cause_misc);
7090 /* Clear the cause register */
7091 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
7092 mvpp2_percpu_write(port->priv, cpu,
7093 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7094 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
7097 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7099 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7100 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7103 /* Process RX packets */
7104 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7105 cause_rx <<= qv->first_rxq;
7106 cause_rx |= qv->pending_cause_rx;
7107 while (cause_rx && budget > 0) {
7109 struct mvpp2_rx_queue *rxq;
7111 rxq = mvpp2_get_rx_queue(port, cause_rx);
7115 count = mvpp2_rx(port, napi, budget, rxq);
7119 /* Clear the bit associated to this Rx queue
7120 * so that next iteration will continue from
7121 * the next Rx queue.
7123 cause_rx &= ~(1 << rxq->logic_rxq);
7129 napi_complete_done(napi, rx_done);
7131 mvpp2_qvec_interrupt_enable(qv);
7133 qv->pending_cause_rx = cause_rx;
7137 /* Set hw internals when starting port */
7138 static void mvpp2_start_dev(struct mvpp2_port *port)
7140 struct net_device *ndev = port->dev;
7143 if (port->gop_id == 0 &&
7144 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7145 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7146 mvpp2_xlg_max_rx_size_set(port);
7148 mvpp2_gmac_max_rx_size_set(port);
7150 mvpp2_txp_max_tx_size_set(port);
7152 for (i = 0; i < port->nqvecs; i++)
7153 napi_enable(&port->qvecs[i].napi);
7155 /* Enable interrupts on all CPUs */
7156 mvpp2_interrupts_enable(port);
7158 if (port->priv->hw_version == MVPP22) {
7159 mvpp22_comphy_init(port);
7160 mvpp22_gop_init(port);
7163 mvpp2_port_mii_set(port);
7164 mvpp2_port_enable(port);
7166 phy_start(ndev->phydev);
7167 netif_tx_start_all_queues(port->dev);
7170 /* Set hw internals when stopping port */
7171 static void mvpp2_stop_dev(struct mvpp2_port *port)
7173 struct net_device *ndev = port->dev;
7176 /* Stop new packets from arriving to RXQs */
7177 mvpp2_ingress_disable(port);
7181 /* Disable interrupts on all CPUs */
7182 mvpp2_interrupts_disable(port);
7184 for (i = 0; i < port->nqvecs; i++)
7185 napi_disable(&port->qvecs[i].napi);
7187 netif_carrier_off(port->dev);
7188 netif_tx_stop_all_queues(port->dev);
7190 mvpp2_egress_disable(port);
7191 mvpp2_port_disable(port);
7193 phy_stop(ndev->phydev);
7194 phy_power_off(port->comphy);
7197 static int mvpp2_check_ringparam_valid(struct net_device *dev,
7198 struct ethtool_ringparam *ring)
7200 u16 new_rx_pending = ring->rx_pending;
7201 u16 new_tx_pending = ring->tx_pending;
7203 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7206 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7207 new_rx_pending = MVPP2_MAX_RXD_MAX;
7208 else if (!IS_ALIGNED(ring->rx_pending, 16))
7209 new_rx_pending = ALIGN(ring->rx_pending, 16);
7211 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7212 new_tx_pending = MVPP2_MAX_TXD_MAX;
7213 else if (!IS_ALIGNED(ring->tx_pending, 32))
7214 new_tx_pending = ALIGN(ring->tx_pending, 32);
7216 /* The Tx ring size cannot be smaller than the minimum number of
7217 * descriptors needed for TSO.
7219 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7220 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7222 if (ring->rx_pending != new_rx_pending) {
7223 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7224 ring->rx_pending, new_rx_pending);
7225 ring->rx_pending = new_rx_pending;
7228 if (ring->tx_pending != new_tx_pending) {
7229 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7230 ring->tx_pending, new_tx_pending);
7231 ring->tx_pending = new_tx_pending;
7237 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
7239 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7241 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7242 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7243 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7244 addr[0] = (mac_addr_h >> 24) & 0xFF;
7245 addr[1] = (mac_addr_h >> 16) & 0xFF;
7246 addr[2] = (mac_addr_h >> 8) & 0xFF;
7247 addr[3] = mac_addr_h & 0xFF;
7248 addr[4] = mac_addr_m & 0xFF;
7249 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7252 static int mvpp2_phy_connect(struct mvpp2_port *port)
7254 struct phy_device *phy_dev;
7256 /* No PHY is attached */
7257 if (!port->phy_node)
7260 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7261 port->phy_interface);
7263 netdev_err(port->dev, "cannot connect to phy\n");
7266 phy_dev->supported &= PHY_GBIT_FEATURES;
7267 phy_dev->advertising = phy_dev->supported;
7276 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7278 struct net_device *ndev = port->dev;
7283 phy_disconnect(ndev->phydev);
7286 static int mvpp2_irqs_init(struct mvpp2_port *port)
7290 for (i = 0; i < port->nqvecs; i++) {
7291 struct mvpp2_queue_vector *qv = port->qvecs + i;
7293 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7294 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7296 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7300 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7301 irq_set_affinity_hint(qv->irq,
7302 cpumask_of(qv->sw_thread_id));
7307 for (i = 0; i < port->nqvecs; i++) {
7308 struct mvpp2_queue_vector *qv = port->qvecs + i;
7310 irq_set_affinity_hint(qv->irq, NULL);
7311 free_irq(qv->irq, qv);
7317 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7321 for (i = 0; i < port->nqvecs; i++) {
7322 struct mvpp2_queue_vector *qv = port->qvecs + i;
7324 irq_set_affinity_hint(qv->irq, NULL);
7325 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
7326 free_irq(qv->irq, qv);
7330 static void mvpp22_init_rss(struct mvpp2_port *port)
7332 struct mvpp2 *priv = port->priv;
7335 /* Set the table width: replace the whole classifier Rx queue number
7336 * with the ones configured in RSS table entries.
7338 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7339 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7341 /* Loop through the classifier Rx Queues and map them to a RSS table.
7342 * Map them all to the first table (0) by default.
7344 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7345 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7346 mvpp2_write(priv, MVPP22_RSS_TABLE,
7347 MVPP22_RSS_TABLE_POINTER(0));
7350 /* Configure the first table to evenly distribute the packets across
7351 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7353 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7354 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7355 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7356 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7358 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7363 static int mvpp2_open(struct net_device *dev)
7365 struct mvpp2_port *port = netdev_priv(dev);
7366 struct mvpp2 *priv = port->priv;
7367 unsigned char mac_bcast[ETH_ALEN] = {
7368 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7371 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
7373 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7376 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
7378 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
7381 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7383 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7386 err = mvpp2_prs_def_flow(port);
7388 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7392 /* Allocate the Rx/Tx queues */
7393 err = mvpp2_setup_rxqs(port);
7395 netdev_err(port->dev, "cannot allocate Rx queues\n");
7399 err = mvpp2_setup_txqs(port);
7401 netdev_err(port->dev, "cannot allocate Tx queues\n");
7402 goto err_cleanup_rxqs;
7405 err = mvpp2_irqs_init(port);
7407 netdev_err(port->dev, "cannot init IRQs\n");
7408 goto err_cleanup_txqs;
7411 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7412 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7415 netdev_err(port->dev, "cannot request link IRQ %d\n",
7420 mvpp22_gop_setup_irq(port);
7423 /* In default link is down */
7424 netif_carrier_off(port->dev);
7426 err = mvpp2_phy_connect(port);
7428 goto err_free_link_irq;
7430 /* Unmask interrupts on all CPUs */
7431 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
7432 mvpp2_shared_interrupt_mask_unmask(port, false);
7434 mvpp2_start_dev(port);
7436 if (priv->hw_version == MVPP22)
7437 mvpp22_init_rss(port);
7439 /* Start hardware statistics gathering */
7440 queue_delayed_work(priv->stats_queue, &port->stats_work,
7441 MVPP2_MIB_COUNTERS_STATS_DELAY);
7446 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7447 free_irq(port->link_irq, port);
7449 mvpp2_irqs_deinit(port);
7451 mvpp2_cleanup_txqs(port);
7453 mvpp2_cleanup_rxqs(port);
7457 static int mvpp2_stop(struct net_device *dev)
7459 struct mvpp2_port *port = netdev_priv(dev);
7460 struct mvpp2_port_pcpu *port_pcpu;
7461 struct mvpp2 *priv = port->priv;
7464 mvpp2_stop_dev(port);
7465 mvpp2_phy_disconnect(port);
7467 /* Mask interrupts on all CPUs */
7468 on_each_cpu(mvpp2_interrupts_mask, port, 1);
7469 mvpp2_shared_interrupt_mask_unmask(port, true);
7471 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7472 free_irq(port->link_irq, port);
7474 mvpp2_irqs_deinit(port);
7475 if (!port->has_tx_irqs) {
7476 for_each_present_cpu(cpu) {
7477 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
7479 hrtimer_cancel(&port_pcpu->tx_done_timer);
7480 port_pcpu->timer_scheduled = false;
7481 tasklet_kill(&port_pcpu->tx_done_tasklet);
7484 mvpp2_cleanup_rxqs(port);
7485 mvpp2_cleanup_txqs(port);
7487 cancel_delayed_work_sync(&port->stats_work);
7492 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
7493 struct netdev_hw_addr_list *list)
7495 struct netdev_hw_addr *ha;
7498 netdev_hw_addr_list_for_each(ha, list) {
7499 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
7507 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
7509 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
7510 mvpp2_prs_vid_enable_filtering(port);
7512 mvpp2_prs_vid_disable_filtering(port);
7514 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7515 MVPP2_PRS_L2_UNI_CAST, enable);
7517 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7518 MVPP2_PRS_L2_MULTI_CAST, enable);
7521 static void mvpp2_set_rx_mode(struct net_device *dev)
7523 struct mvpp2_port *port = netdev_priv(dev);
7525 /* Clear the whole UC and MC list */
7526 mvpp2_prs_mac_del_all(port);
7528 if (dev->flags & IFF_PROMISC) {
7529 mvpp2_set_rx_promisc(port, true);
7533 mvpp2_set_rx_promisc(port, false);
7535 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
7536 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
7537 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7538 MVPP2_PRS_L2_UNI_CAST, true);
7540 if (dev->flags & IFF_ALLMULTI) {
7541 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7542 MVPP2_PRS_L2_MULTI_CAST, true);
7546 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
7547 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
7548 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7549 MVPP2_PRS_L2_MULTI_CAST, true);
7552 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7554 struct mvpp2_port *port = netdev_priv(dev);
7555 const struct sockaddr *addr = p;
7558 if (!is_valid_ether_addr(addr->sa_data)) {
7559 err = -EADDRNOTAVAIL;
7563 if (!netif_running(dev)) {
7564 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7567 /* Reconfigure parser to accept the original MAC address */
7568 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7573 mvpp2_stop_dev(port);
7575 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7579 /* Reconfigure parser accept the original MAC address */
7580 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7584 mvpp2_start_dev(port);
7585 mvpp2_egress_enable(port);
7586 mvpp2_ingress_enable(port);
7589 netdev_err(dev, "failed to change MAC address\n");
7593 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7595 struct mvpp2_port *port = netdev_priv(dev);
7598 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7599 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7600 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7601 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
7604 if (!netif_running(dev)) {
7605 err = mvpp2_bm_update_mtu(dev, mtu);
7607 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7611 /* Reconfigure BM to the original MTU */
7612 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7617 mvpp2_stop_dev(port);
7619 err = mvpp2_bm_update_mtu(dev, mtu);
7621 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7625 /* Reconfigure BM to the original MTU */
7626 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7631 mvpp2_start_dev(port);
7632 mvpp2_egress_enable(port);
7633 mvpp2_ingress_enable(port);
7637 netdev_err(dev, "failed to change MTU\n");
7642 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7644 struct mvpp2_port *port = netdev_priv(dev);
7648 for_each_possible_cpu(cpu) {
7649 struct mvpp2_pcpu_stats *cpu_stats;
7655 cpu_stats = per_cpu_ptr(port->stats, cpu);
7657 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7658 rx_packets = cpu_stats->rx_packets;
7659 rx_bytes = cpu_stats->rx_bytes;
7660 tx_packets = cpu_stats->tx_packets;
7661 tx_bytes = cpu_stats->tx_bytes;
7662 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7664 stats->rx_packets += rx_packets;
7665 stats->rx_bytes += rx_bytes;
7666 stats->tx_packets += tx_packets;
7667 stats->tx_bytes += tx_bytes;
7670 stats->rx_errors = dev->stats.rx_errors;
7671 stats->rx_dropped = dev->stats.rx_dropped;
7672 stats->tx_dropped = dev->stats.tx_dropped;
7675 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7682 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
7684 mvpp2_link_event(dev);
7689 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7691 struct mvpp2_port *port = netdev_priv(dev);
7694 ret = mvpp2_prs_vid_entry_add(port, vid);
7696 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7697 MVPP2_PRS_VLAN_FILT_MAX - 1);
7701 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7703 struct mvpp2_port *port = netdev_priv(dev);
7705 mvpp2_prs_vid_entry_remove(port, vid);
7709 static int mvpp2_set_features(struct net_device *dev,
7710 netdev_features_t features)
7712 netdev_features_t changed = dev->features ^ features;
7713 struct mvpp2_port *port = netdev_priv(dev);
7715 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7716 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7717 mvpp2_prs_vid_enable_filtering(port);
7719 /* Invalidate all registered VID filters for this
7722 mvpp2_prs_vid_remove_all(port);
7724 mvpp2_prs_vid_disable_filtering(port);
7731 /* Ethtool methods */
7733 /* Set interrupt coalescing for ethtools */
7734 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7735 struct ethtool_coalesce *c)
7737 struct mvpp2_port *port = netdev_priv(dev);
7740 for (queue = 0; queue < port->nrxqs; queue++) {
7741 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7743 rxq->time_coal = c->rx_coalesce_usecs;
7744 rxq->pkts_coal = c->rx_max_coalesced_frames;
7745 mvpp2_rx_pkts_coal_set(port, rxq);
7746 mvpp2_rx_time_coal_set(port, rxq);
7749 if (port->has_tx_irqs) {
7750 port->tx_time_coal = c->tx_coalesce_usecs;
7751 mvpp2_tx_time_coal_set(port);
7754 for (queue = 0; queue < port->ntxqs; queue++) {
7755 struct mvpp2_tx_queue *txq = port->txqs[queue];
7757 txq->done_pkts_coal = c->tx_max_coalesced_frames;
7759 if (port->has_tx_irqs)
7760 mvpp2_tx_pkts_coal_set(port, txq);
7766 /* get coalescing for ethtools */
7767 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7768 struct ethtool_coalesce *c)
7770 struct mvpp2_port *port = netdev_priv(dev);
7772 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7773 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7774 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
7775 c->tx_coalesce_usecs = port->tx_time_coal;
7779 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7780 struct ethtool_drvinfo *drvinfo)
7782 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7783 sizeof(drvinfo->driver));
7784 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7785 sizeof(drvinfo->version));
7786 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7787 sizeof(drvinfo->bus_info));
7790 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7791 struct ethtool_ringparam *ring)
7793 struct mvpp2_port *port = netdev_priv(dev);
7795 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7796 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
7797 ring->rx_pending = port->rx_ring_size;
7798 ring->tx_pending = port->tx_ring_size;
7801 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7802 struct ethtool_ringparam *ring)
7804 struct mvpp2_port *port = netdev_priv(dev);
7805 u16 prev_rx_ring_size = port->rx_ring_size;
7806 u16 prev_tx_ring_size = port->tx_ring_size;
7809 err = mvpp2_check_ringparam_valid(dev, ring);
7813 if (!netif_running(dev)) {
7814 port->rx_ring_size = ring->rx_pending;
7815 port->tx_ring_size = ring->tx_pending;
7819 /* The interface is running, so we have to force a
7820 * reallocation of the queues
7822 mvpp2_stop_dev(port);
7823 mvpp2_cleanup_rxqs(port);
7824 mvpp2_cleanup_txqs(port);
7826 port->rx_ring_size = ring->rx_pending;
7827 port->tx_ring_size = ring->tx_pending;
7829 err = mvpp2_setup_rxqs(port);
7831 /* Reallocate Rx queues with the original ring size */
7832 port->rx_ring_size = prev_rx_ring_size;
7833 ring->rx_pending = prev_rx_ring_size;
7834 err = mvpp2_setup_rxqs(port);
7838 err = mvpp2_setup_txqs(port);
7840 /* Reallocate Tx queues with the original ring size */
7841 port->tx_ring_size = prev_tx_ring_size;
7842 ring->tx_pending = prev_tx_ring_size;
7843 err = mvpp2_setup_txqs(port);
7845 goto err_clean_rxqs;
7848 mvpp2_start_dev(port);
7849 mvpp2_egress_enable(port);
7850 mvpp2_ingress_enable(port);
7855 mvpp2_cleanup_rxqs(port);
7857 netdev_err(dev, "failed to change ring parameters");
7863 static const struct net_device_ops mvpp2_netdev_ops = {
7864 .ndo_open = mvpp2_open,
7865 .ndo_stop = mvpp2_stop,
7866 .ndo_start_xmit = mvpp2_tx,
7867 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7868 .ndo_set_mac_address = mvpp2_set_mac_address,
7869 .ndo_change_mtu = mvpp2_change_mtu,
7870 .ndo_get_stats64 = mvpp2_get_stats64,
7871 .ndo_do_ioctl = mvpp2_ioctl,
7872 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7873 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7874 .ndo_set_features = mvpp2_set_features,
7877 static const struct ethtool_ops mvpp2_eth_tool_ops = {
7878 .nway_reset = phy_ethtool_nway_reset,
7879 .get_link = ethtool_op_get_link,
7880 .set_coalesce = mvpp2_ethtool_set_coalesce,
7881 .get_coalesce = mvpp2_ethtool_get_coalesce,
7882 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7883 .get_ringparam = mvpp2_ethtool_get_ringparam,
7884 .set_ringparam = mvpp2_ethtool_set_ringparam,
7885 .get_strings = mvpp2_ethtool_get_strings,
7886 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7887 .get_sset_count = mvpp2_ethtool_get_sset_count,
7888 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7889 .set_link_ksettings = phy_ethtool_set_link_ksettings,
7892 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7893 * had a single IRQ defined per-port.
7895 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7896 struct device_node *port_node)
7898 struct mvpp2_queue_vector *v = &port->qvecs[0];
7901 v->nrxqs = port->nrxqs;
7902 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7903 v->sw_thread_id = 0;
7904 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7906 v->irq = irq_of_parse_and_map(port_node, 0);
7909 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7917 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7918 struct device_node *port_node)
7920 struct mvpp2_queue_vector *v;
7923 port->nqvecs = num_possible_cpus();
7924 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7927 for (i = 0; i < port->nqvecs; i++) {
7930 v = port->qvecs + i;
7933 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7934 v->sw_thread_id = i;
7935 v->sw_thread_mask = BIT(i);
7937 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7939 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7940 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7941 v->nrxqs = MVPP2_DEFAULT_RXQ;
7942 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7943 i == (port->nqvecs - 1)) {
7945 v->nrxqs = port->nrxqs;
7946 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7947 strncpy(irqname, "rx-shared", sizeof(irqname));
7951 v->irq = of_irq_get_byname(port_node, irqname);
7953 v->irq = fwnode_irq_get(port->fwnode, i);
7959 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7966 for (i = 0; i < port->nqvecs; i++)
7967 irq_dispose_mapping(port->qvecs[i].irq);
7971 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7972 struct device_node *port_node)
7974 if (port->has_tx_irqs)
7975 return mvpp2_multi_queue_vectors_init(port, port_node);
7977 return mvpp2_simple_queue_vectors_init(port, port_node);
7980 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7984 for (i = 0; i < port->nqvecs; i++)
7985 irq_dispose_mapping(port->qvecs[i].irq);
7988 /* Configure Rx queue group interrupt for this port */
7989 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7991 struct mvpp2 *priv = port->priv;
7995 if (priv->hw_version == MVPP21) {
7996 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
8001 /* Handle the more complicated PPv2.2 case */
8002 for (i = 0; i < port->nqvecs; i++) {
8003 struct mvpp2_queue_vector *qv = port->qvecs + i;
8008 val = qv->sw_thread_id;
8009 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
8010 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
8012 val = qv->first_rxq;
8013 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
8014 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
8018 /* Initialize port HW */
8019 static int mvpp2_port_init(struct mvpp2_port *port)
8021 struct device *dev = port->dev->dev.parent;
8022 struct mvpp2 *priv = port->priv;
8023 struct mvpp2_txq_pcpu *txq_pcpu;
8024 int queue, cpu, err;
8026 /* Checks for hardware constraints */
8027 if (port->first_rxq + port->nrxqs >
8028 MVPP2_MAX_PORTS * priv->max_port_rxqs)
8031 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
8032 (port->ntxqs > MVPP2_MAX_TXQ))
8036 mvpp2_egress_disable(port);
8037 mvpp2_port_disable(port);
8039 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
8041 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
8046 /* Associate physical Tx queues to this port and initialize.
8047 * The mapping is predefined.
8049 for (queue = 0; queue < port->ntxqs; queue++) {
8050 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
8051 struct mvpp2_tx_queue *txq;
8053 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
8056 goto err_free_percpu;
8059 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
8062 goto err_free_percpu;
8065 txq->id = queue_phy_id;
8066 txq->log_id = queue;
8067 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
8068 for_each_present_cpu(cpu) {
8069 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8070 txq_pcpu->cpu = cpu;
8073 port->txqs[queue] = txq;
8076 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
8080 goto err_free_percpu;
8083 /* Allocate and initialize Rx queue for this port */
8084 for (queue = 0; queue < port->nrxqs; queue++) {
8085 struct mvpp2_rx_queue *rxq;
8087 /* Map physical Rx queue to port's logical Rx queue */
8088 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
8091 goto err_free_percpu;
8093 /* Map this Rx queue to a physical queue */
8094 rxq->id = port->first_rxq + queue;
8095 rxq->port = port->id;
8096 rxq->logic_rxq = queue;
8098 port->rxqs[queue] = rxq;
8101 mvpp2_rx_irqs_setup(port);
8103 /* Create Rx descriptor rings */
8104 for (queue = 0; queue < port->nrxqs; queue++) {
8105 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8107 rxq->size = port->rx_ring_size;
8108 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8109 rxq->time_coal = MVPP2_RX_COAL_USEC;
8112 mvpp2_ingress_disable(port);
8114 /* Port default configuration */
8115 mvpp2_defaults_set(port);
8117 /* Port's classifier configuration */
8118 mvpp2_cls_oversize_rxq_set(port);
8119 mvpp2_cls_port_config(port);
8121 /* Provide an initial Rx packet size */
8122 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8124 /* Initialize pools for swf */
8125 err = mvpp2_swf_bm_pool_init(port);
8127 goto err_free_percpu;
8132 for (queue = 0; queue < port->ntxqs; queue++) {
8133 if (!port->txqs[queue])
8135 free_percpu(port->txqs[queue]->pcpu);
8140 /* Checks if the port DT description has the TX interrupts
8141 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8142 * there are available, but we need to keep support for old DTs.
8144 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8145 struct device_node *port_node)
8147 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8148 "tx-cpu2", "tx-cpu3" };
8151 if (priv->hw_version == MVPP21)
8154 for (i = 0; i < 5; i++) {
8155 ret = of_property_match_string(port_node, "interrupt-names",
8164 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
8165 struct fwnode_handle *fwnode,
8168 struct mvpp2_port *port = netdev_priv(dev);
8169 char hw_mac_addr[ETH_ALEN] = {0};
8170 char fw_mac_addr[ETH_ALEN];
8172 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8173 *mac_from = "firmware node";
8174 ether_addr_copy(dev->dev_addr, fw_mac_addr);
8178 if (priv->hw_version == MVPP21) {
8179 mvpp21_get_mac_address(port, hw_mac_addr);
8180 if (is_valid_ether_addr(hw_mac_addr)) {
8181 *mac_from = "hardware";
8182 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8187 *mac_from = "random";
8188 eth_hw_addr_random(dev);
8191 /* Ports initialization */
8192 static int mvpp2_port_probe(struct platform_device *pdev,
8193 struct fwnode_handle *port_fwnode,
8196 struct device_node *phy_node;
8197 struct phy *comphy = NULL;
8198 struct mvpp2_port *port;
8199 struct mvpp2_port_pcpu *port_pcpu;
8200 struct device_node *port_node = to_of_node(port_fwnode);
8201 struct net_device *dev;
8202 struct resource *res;
8203 char *mac_from = "";
8204 unsigned int ntxqs, nrxqs;
8212 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8215 queue_mode = MVPP2_QDIST_MULTI_MODE;
8219 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8221 ntxqs = MVPP2_MAX_TXQ;
8222 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8223 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8225 nrxqs = MVPP2_DEFAULT_RXQ;
8227 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
8232 phy_node = of_parse_phandle(port_node, "phy", 0);
8236 phy_mode = fwnode_get_phy_mode(port_fwnode);
8238 dev_err(&pdev->dev, "incorrect phy mode\n");
8240 goto err_free_netdev;
8244 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8245 if (IS_ERR(comphy)) {
8246 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8247 err = -EPROBE_DEFER;
8248 goto err_free_netdev;
8254 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
8256 dev_err(&pdev->dev, "missing port-id value\n");
8257 goto err_free_netdev;
8260 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
8261 dev->watchdog_timeo = 5 * HZ;
8262 dev->netdev_ops = &mvpp2_netdev_ops;
8263 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8265 port = netdev_priv(dev);
8267 port->fwnode = port_fwnode;
8268 port->ntxqs = ntxqs;
8269 port->nrxqs = nrxqs;
8271 port->has_tx_irqs = has_tx_irqs;
8273 err = mvpp2_queue_vectors_init(port, port_node);
8275 goto err_free_netdev;
8278 port->link_irq = of_irq_get_byname(port_node, "link");
8280 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
8281 if (port->link_irq == -EPROBE_DEFER) {
8282 err = -EPROBE_DEFER;
8283 goto err_deinit_qvecs;
8285 if (port->link_irq <= 0)
8286 /* the link irq is optional */
8289 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
8290 port->flags |= MVPP2_F_LOOPBACK;
8293 if (priv->hw_version == MVPP21)
8294 port->first_rxq = port->id * port->nrxqs;
8296 port->first_rxq = port->id * priv->max_port_rxqs;
8298 port->phy_node = phy_node;
8299 port->phy_interface = phy_mode;
8300 port->comphy = comphy;
8302 if (priv->hw_version == MVPP21) {
8303 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8304 port->base = devm_ioremap_resource(&pdev->dev, res);
8305 if (IS_ERR(port->base)) {
8306 err = PTR_ERR(port->base);
8310 port->stats_base = port->priv->lms_base +
8311 MVPP21_MIB_COUNTERS_OFFSET +
8312 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
8314 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8317 dev_err(&pdev->dev, "missing gop-port-id value\n");
8318 goto err_deinit_qvecs;
8321 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
8322 port->stats_base = port->priv->iface_base +
8323 MVPP22_MIB_COUNTERS_OFFSET +
8324 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
8327 /* Alloc per-cpu and ethtool stats */
8328 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8334 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8335 ARRAY_SIZE(mvpp2_ethtool_regs),
8336 sizeof(u64), GFP_KERNEL);
8337 if (!port->ethtool_stats) {
8339 goto err_free_stats;
8342 mutex_init(&port->gather_stats_lock);
8343 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8345 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
8347 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8348 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
8349 SET_NETDEV_DEV(dev, &pdev->dev);
8351 err = mvpp2_port_init(port);
8353 dev_err(&pdev->dev, "failed to init port %d\n", id);
8354 goto err_free_stats;
8357 mvpp2_port_periodic_xon_disable(port);
8359 if (priv->hw_version == MVPP21)
8360 mvpp2_port_fc_adv_enable(port);
8362 mvpp2_port_reset(port);
8364 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8367 goto err_free_txq_pcpu;
8370 if (!port->has_tx_irqs) {
8371 for_each_present_cpu(cpu) {
8372 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
8374 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8375 HRTIMER_MODE_REL_PINNED);
8376 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8377 port_pcpu->timer_scheduled = false;
8379 tasklet_init(&port_pcpu->tx_done_tasklet,
8381 (unsigned long)dev);
8385 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8387 dev->features = features | NETIF_F_RXCSUM;
8388 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8389 NETIF_F_HW_VLAN_CTAG_FILTER;
8391 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
8392 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8393 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8396 dev->vlan_features |= features;
8397 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
8398 dev->priv_flags |= IFF_UNICAST_FLT;
8400 /* MTU range: 68 - 9704 */
8401 dev->min_mtu = ETH_MIN_MTU;
8402 /* 9704 == 9728 - 20 and rounding to 8 */
8403 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
8405 err = register_netdev(dev);
8407 dev_err(&pdev->dev, "failed to register netdev\n");
8408 goto err_free_port_pcpu;
8410 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8412 priv->port_list[priv->port_count++] = port;
8417 free_percpu(port->pcpu);
8419 for (i = 0; i < port->ntxqs; i++)
8420 free_percpu(port->txqs[i]->pcpu);
8422 free_percpu(port->stats);
8425 irq_dispose_mapping(port->link_irq);
8427 mvpp2_queue_vectors_deinit(port);
8429 of_node_put(phy_node);
8434 /* Ports removal routine */
8435 static void mvpp2_port_remove(struct mvpp2_port *port)
8439 unregister_netdev(port->dev);
8440 of_node_put(port->phy_node);
8441 free_percpu(port->pcpu);
8442 free_percpu(port->stats);
8443 for (i = 0; i < port->ntxqs; i++)
8444 free_percpu(port->txqs[i]->pcpu);
8445 mvpp2_queue_vectors_deinit(port);
8447 irq_dispose_mapping(port->link_irq);
8448 free_netdev(port->dev);
8451 /* Initialize decoding windows */
8452 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8458 for (i = 0; i < 6; i++) {
8459 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8460 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8463 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8468 for (i = 0; i < dram->num_cs; i++) {
8469 const struct mbus_dram_window *cs = dram->cs + i;
8471 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8472 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8473 dram->mbus_dram_target_id);
8475 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8476 (cs->size - 1) & 0xffff0000);
8478 win_enable |= (1 << i);
8481 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8484 /* Initialize Rx FIFO's */
8485 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8489 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8490 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8491 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8492 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8493 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8496 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8497 MVPP2_RX_FIFO_PORT_MIN_PKT);
8498 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8501 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8505 /* The FIFO size parameters are set depending on the maximum speed a
8506 * given port can handle:
8509 * - Ports 2 and 3: 1Gbps
8512 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8513 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8514 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8515 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8517 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8518 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8519 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8520 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8522 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8523 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8524 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8525 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8526 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8529 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8530 MVPP2_RX_FIFO_PORT_MIN_PKT);
8531 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8534 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
8535 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
8536 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
8538 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8540 int port, size, thrs;
8542 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8544 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
8545 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
8547 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
8548 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
8550 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
8551 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
8555 static void mvpp2_axi_init(struct mvpp2 *priv)
8557 u32 val, rdval, wrval;
8559 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8561 /* AXI Bridge Configuration */
8563 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8564 << MVPP22_AXI_ATTR_CACHE_OFFS;
8565 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8566 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8568 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8569 << MVPP22_AXI_ATTR_CACHE_OFFS;
8570 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8571 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8574 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8575 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8578 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8579 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8580 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8581 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8584 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8585 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8587 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8588 << MVPP22_AXI_CODE_CACHE_OFFS;
8589 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8590 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8591 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8592 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8594 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8595 << MVPP22_AXI_CODE_CACHE_OFFS;
8596 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8597 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8599 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8601 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8602 << MVPP22_AXI_CODE_CACHE_OFFS;
8603 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8604 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8606 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8609 /* Initialize network controller common part HW */
8610 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8612 const struct mbus_dram_target_info *dram_target_info;
8616 /* MBUS windows configuration */
8617 dram_target_info = mv_mbus_dram_info();
8618 if (dram_target_info)
8619 mvpp2_conf_mbus_windows(dram_target_info, priv);
8621 if (priv->hw_version == MVPP22)
8622 mvpp2_axi_init(priv);
8624 /* Disable HW PHY polling */
8625 if (priv->hw_version == MVPP21) {
8626 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8627 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8628 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8630 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8631 val &= ~MVPP22_SMI_POLLING_EN;
8632 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8635 /* Allocate and initialize aggregated TXQs */
8636 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
8637 sizeof(*priv->aggr_txqs),
8639 if (!priv->aggr_txqs)
8642 for_each_present_cpu(i) {
8643 priv->aggr_txqs[i].id = i;
8644 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
8645 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
8651 if (priv->hw_version == MVPP21) {
8652 mvpp2_rx_fifo_init(priv);
8654 mvpp22_rx_fifo_init(priv);
8655 mvpp22_tx_fifo_init(priv);
8658 if (priv->hw_version == MVPP21)
8659 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8660 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
8662 /* Allow cache snoop when transmiting packets */
8663 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8665 /* Buffer Manager initialization */
8666 err = mvpp2_bm_init(pdev, priv);
8670 /* Parser default initialization */
8671 err = mvpp2_prs_default_init(pdev, priv);
8675 /* Classifier default initialization */
8676 mvpp2_cls_init(priv);
8681 static int mvpp2_probe(struct platform_device *pdev)
8683 const struct acpi_device_id *acpi_id;
8684 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8685 struct fwnode_handle *port_fwnode;
8687 struct resource *res;
8692 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
8696 if (has_acpi_companion(&pdev->dev)) {
8697 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8699 priv->hw_version = (unsigned long)acpi_id->driver_data;
8702 (unsigned long)of_device_get_match_data(&pdev->dev);
8705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8706 base = devm_ioremap_resource(&pdev->dev, res);
8708 return PTR_ERR(base);
8710 if (priv->hw_version == MVPP21) {
8711 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8712 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8713 if (IS_ERR(priv->lms_base))
8714 return PTR_ERR(priv->lms_base);
8716 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8717 if (has_acpi_companion(&pdev->dev)) {
8718 /* In case the MDIO memory region is declared in
8719 * the ACPI, it can already appear as 'in-use'
8720 * in the OS. Because it is overlapped by second
8721 * region of the network controller, make
8722 * sure it is released, before requesting it again.
8723 * The care is taken by mvpp2 driver to avoid
8724 * concurrent access to this memory region.
8726 release_resource(res);
8728 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8729 if (IS_ERR(priv->iface_base))
8730 return PTR_ERR(priv->iface_base);
8733 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
8734 priv->sysctrl_base =
8735 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8736 "marvell,system-controller");
8737 if (IS_ERR(priv->sysctrl_base))
8738 /* The system controller regmap is optional for dt
8739 * compatibility reasons. When not provided, the
8740 * configuration of the GoP relies on the
8741 * firmware/bootloader.
8743 priv->sysctrl_base = NULL;
8746 mvpp2_setup_bm_pool();
8748 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
8751 addr_space_sz = (priv->hw_version == MVPP21 ?
8752 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
8753 priv->swth_base[i] = base + i * addr_space_sz;
8756 if (priv->hw_version == MVPP21)
8757 priv->max_port_rxqs = 8;
8759 priv->max_port_rxqs = 32;
8761 if (dev_of_node(&pdev->dev)) {
8762 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8763 if (IS_ERR(priv->pp_clk))
8764 return PTR_ERR(priv->pp_clk);
8765 err = clk_prepare_enable(priv->pp_clk);
8769 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8770 if (IS_ERR(priv->gop_clk)) {
8771 err = PTR_ERR(priv->gop_clk);
8774 err = clk_prepare_enable(priv->gop_clk);
8778 if (priv->hw_version == MVPP22) {
8779 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8780 if (IS_ERR(priv->mg_clk)) {
8781 err = PTR_ERR(priv->mg_clk);
8785 err = clk_prepare_enable(priv->mg_clk);
8790 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8791 if (IS_ERR(priv->axi_clk)) {
8792 err = PTR_ERR(priv->axi_clk);
8793 if (err == -EPROBE_DEFER)
8795 priv->axi_clk = NULL;
8797 err = clk_prepare_enable(priv->axi_clk);
8802 /* Get system's tclk rate */
8803 priv->tclk = clk_get_rate(priv->pp_clk);
8804 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8806 dev_err(&pdev->dev, "missing clock-frequency value\n");
8810 if (priv->hw_version == MVPP22) {
8811 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
8814 /* Sadly, the BM pools all share the same register to
8815 * store the high 32 bits of their address. So they
8816 * must all have the same high 32 bits, which forces
8817 * us to restrict coherent memory to DMA_BIT_MASK(32).
8819 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8824 /* Initialize network controller */
8825 err = mvpp2_init(pdev, priv);
8827 dev_err(&pdev->dev, "failed to initialize controller\n");
8831 /* Initialize ports */
8832 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8833 err = mvpp2_port_probe(pdev, port_fwnode, priv);
8835 goto err_port_probe;
8838 if (priv->port_count == 0) {
8839 dev_err(&pdev->dev, "no ports enabled\n");
8844 /* Statistics must be gathered regularly because some of them (like
8845 * packets counters) are 32-bit registers and could overflow quite
8846 * quickly. For instance, a 10Gb link used at full bandwidth with the
8847 * smallest packets (64B) will overflow a 32-bit counter in less than
8848 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8850 snprintf(priv->queue_name, sizeof(priv->queue_name),
8851 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8852 priv->port_count > 1 ? "+" : "");
8853 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8854 if (!priv->stats_queue) {
8856 goto err_port_probe;
8859 platform_set_drvdata(pdev, priv);
8864 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8865 if (priv->port_list[i])
8866 mvpp2_port_remove(priv->port_list[i]);
8870 clk_disable_unprepare(priv->axi_clk);
8871 if (priv->hw_version == MVPP22)
8872 clk_disable_unprepare(priv->mg_clk);
8874 clk_disable_unprepare(priv->gop_clk);
8876 clk_disable_unprepare(priv->pp_clk);
8880 static int mvpp2_remove(struct platform_device *pdev)
8882 struct mvpp2 *priv = platform_get_drvdata(pdev);
8883 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8884 struct fwnode_handle *port_fwnode;
8887 flush_workqueue(priv->stats_queue);
8888 destroy_workqueue(priv->stats_queue);
8890 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8891 if (priv->port_list[i]) {
8892 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
8893 mvpp2_port_remove(priv->port_list[i]);
8898 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8899 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8901 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8904 for_each_present_cpu(i) {
8905 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8907 dma_free_coherent(&pdev->dev,
8908 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8910 aggr_txq->descs_dma);
8913 if (is_acpi_node(port_fwnode))
8916 clk_disable_unprepare(priv->axi_clk);
8917 clk_disable_unprepare(priv->mg_clk);
8918 clk_disable_unprepare(priv->pp_clk);
8919 clk_disable_unprepare(priv->gop_clk);
8924 static const struct of_device_id mvpp2_match[] = {
8926 .compatible = "marvell,armada-375-pp2",
8927 .data = (void *)MVPP21,
8930 .compatible = "marvell,armada-7k-pp22",
8931 .data = (void *)MVPP22,
8935 MODULE_DEVICE_TABLE(of, mvpp2_match);
8937 static const struct acpi_device_id mvpp2_acpi_match[] = {
8938 { "MRVL0110", MVPP22 },
8941 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8943 static struct platform_driver mvpp2_driver = {
8944 .probe = mvpp2_probe,
8945 .remove = mvpp2_remove,
8947 .name = MVPP2_DRIVER_NAME,
8948 .of_match_table = mvpp2_match,
8949 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
8953 module_platform_driver(mvpp2_driver);
8955 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8956 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
8957 MODULE_LICENSE("GPL v2");