2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <uapi/linux/ppp_defs.h>
36 /* RX Fifo Registers */
37 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40 #define MVPP2_RX_FIFO_INIT_REG 0x64
42 /* RX DMA Top Registers */
43 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
48 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
52 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53 #define MVPP2_RXQ_POOL_LONG_OFFS 24
54 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59 /* Parser Registers */
60 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61 #define MVPP2_PRS_PORT_LU_MAX 0xf
62 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
71 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
74 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78 /* Classifier Registers */
79 #define MVPP2_CLS_MODE_REG 0x1800
80 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81 #define MVPP2_CLS_PORT_WAY_REG 0x1810
82 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
84 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85 #define MVPP2_CLS_LKP_TBL_REG 0x1818
86 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99 /* Descriptor Manager Top Registers */
100 #define MVPP2_RXQ_NUM_REG 0x2040
101 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
107 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111 #define MVPP2_RXQ_THRESH_REG 0x204c
112 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
113 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114 #define MVPP2_RXQ_INDEX_REG 0x2050
115 #define MVPP2_TXQ_NUM_REG 0x2080
116 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120 #define MVPP2_TXQ_THRESH_REG 0x2094
121 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123 #define MVPP2_TXQ_INDEX_REG 0x2098
124 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
125 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130 #define MVPP2_TXQ_PENDING_REG 0x20a0
131 #define MVPP2_TXQ_PENDING_MASK 0x3fff
132 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
149 /* MBUS bridge registers */
150 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153 #define MVPP2_BASE_ADDR_ENABLE 0x4060
155 /* Interrupt Cause and Mask registers */
156 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
158 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
159 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
160 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
161 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
162 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
163 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
164 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
165 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
166 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
167 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
168 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
169 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
170 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
171 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
172 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
173 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
174 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
175 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
176 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
178 /* Buffer Manager registers */
179 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
180 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
181 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
182 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
183 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
184 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
185 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
186 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
187 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
188 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
189 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
190 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
191 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
192 #define MVPP2_BM_START_MASK BIT(0)
193 #define MVPP2_BM_STOP_MASK BIT(1)
194 #define MVPP2_BM_STATE_MASK BIT(4)
195 #define MVPP2_BM_LOW_THRESH_OFFS 8
196 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
197 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
198 MVPP2_BM_LOW_THRESH_OFFS)
199 #define MVPP2_BM_HIGH_THRESH_OFFS 16
200 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
201 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
202 MVPP2_BM_HIGH_THRESH_OFFS)
203 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
204 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
205 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
206 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
207 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
208 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
209 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
210 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
211 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
212 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
213 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
214 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
215 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
216 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
217 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
218 #define MVPP2_BM_MC_RLS_REG 0x64c4
219 #define MVPP2_BM_MC_ID_MASK 0xfff
220 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
222 /* TX Scheduler registers */
223 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
224 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
225 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
226 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
227 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
228 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
229 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
230 #define MVPP2_TXP_MTU_MAX 0x7FFFF
231 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
232 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
233 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
234 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
235 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
236 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
237 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
238 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
239 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
240 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
241 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
242 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
243 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
244 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
246 /* TX general registers */
247 #define MVPP2_TX_SNOOP_REG 0x8800
248 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
249 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
252 #define MVPP2_SRC_ADDR_MIDDLE 0x24
253 #define MVPP2_SRC_ADDR_HIGH 0x28
254 #define MVPP2_PHY_AN_CFG0_REG 0x34
255 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
256 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
257 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
259 /* Per-port registers */
260 #define MVPP2_GMAC_CTRL_0_REG 0x0
261 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
262 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
263 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
264 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
265 #define MVPP2_GMAC_CTRL_1_REG 0x4
266 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
267 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
268 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
269 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
270 #define MVPP2_GMAC_SA_LOW_OFFS 7
271 #define MVPP2_GMAC_CTRL_2_REG 0x8
272 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
273 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
274 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
275 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
276 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
277 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
278 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
279 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
280 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
281 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
282 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
283 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
284 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
285 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
286 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
287 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
288 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
289 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
291 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
293 /* Descriptor ring Macros */
294 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
295 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
297 /* Various constants */
300 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
301 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302 #define MVPP2_RX_COAL_PKTS 32
303 #define MVPP2_RX_COAL_USEC 100
305 /* The two bytes Marvell header. Either contains a special value used
306 * by Marvell switches when a specific hardware mode is enabled (not
307 * supported by this driver) or is filled automatically by zeroes on
308 * the RX side. Those two bytes being at the front of the Ethernet
309 * header, they allow to have the IP header aligned on a 4 bytes
310 * boundary automatically: the hardware skips those two bytes on its
313 #define MVPP2_MH_SIZE 2
314 #define MVPP2_ETH_TYPE_LEN 2
315 #define MVPP2_PPPOE_HDR_SIZE 8
316 #define MVPP2_VLAN_TAG_LEN 4
318 /* Lbtd 802.3 type */
319 #define MVPP2_IP_LBDT_TYPE 0xfffa
321 #define MVPP2_TX_CSUM_MAX_SIZE 9800
323 /* Timeout constants */
324 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
325 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
327 #define MVPP2_TX_MTU_MAX 0x7ffff
329 /* Maximum number of T-CONTs of PON port */
330 #define MVPP2_MAX_TCONT 16
332 /* Maximum number of supported ports */
333 #define MVPP2_MAX_PORTS 4
335 /* Maximum number of TXQs used by single port */
336 #define MVPP2_MAX_TXQ 8
338 /* Maximum number of RXQs used by single port */
339 #define MVPP2_MAX_RXQ 8
341 /* Dfault number of RXQs in use */
342 #define MVPP2_DEFAULT_RXQ 4
344 /* Total number of RXQs available to all ports */
345 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
347 /* Max number of Rx descriptors */
348 #define MVPP2_MAX_RXD 128
350 /* Max number of Tx descriptors */
351 #define MVPP2_MAX_TXD 1024
353 /* Amount of Tx descriptors that can be reserved at once by CPU */
354 #define MVPP2_CPU_DESC_CHUNK 64
356 /* Max number of Tx descriptors in each aggregated queue */
357 #define MVPP2_AGGR_TXQ_SIZE 256
359 /* Descriptor aligned size */
360 #define MVPP2_DESC_ALIGNED_SIZE 32
362 /* Descriptor alignment mask */
363 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
365 /* RX FIFO constants */
366 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
367 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
368 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
370 /* RX buffer constants */
371 #define MVPP2_SKB_SHINFO_SIZE \
372 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
374 #define MVPP2_RX_PKT_SIZE(mtu) \
375 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
376 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
378 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
379 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
380 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
381 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
383 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
385 /* IPv6 max L3 address size */
386 #define MVPP2_MAX_L3_ADDR_SIZE 16
389 #define MVPP2_F_LOOPBACK BIT(0)
391 /* Marvell tag types */
392 enum mvpp2_tag_type {
393 MVPP2_TAG_TYPE_NONE = 0,
394 MVPP2_TAG_TYPE_MH = 1,
395 MVPP2_TAG_TYPE_DSA = 2,
396 MVPP2_TAG_TYPE_EDSA = 3,
397 MVPP2_TAG_TYPE_VLAN = 4,
398 MVPP2_TAG_TYPE_LAST = 5
401 /* Parser constants */
402 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
403 #define MVPP2_PRS_TCAM_WORDS 6
404 #define MVPP2_PRS_SRAM_WORDS 4
405 #define MVPP2_PRS_FLOW_ID_SIZE 64
406 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
407 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
408 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
409 #define MVPP2_PRS_IPV4_HEAD 0x40
410 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
411 #define MVPP2_PRS_IPV4_MC 0xe0
412 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
413 #define MVPP2_PRS_IPV4_BC_MASK 0xff
414 #define MVPP2_PRS_IPV4_IHL 0x5
415 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
416 #define MVPP2_PRS_IPV6_MC 0xff
417 #define MVPP2_PRS_IPV6_MC_MASK 0xff
418 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
419 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
420 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
421 #define MVPP2_PRS_DBL_VLANS_MAX 100
424 * - lookup ID - 4 bits
426 * - additional information - 1 byte
427 * - header data - 8 bytes
428 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
430 #define MVPP2_PRS_AI_BITS 8
431 #define MVPP2_PRS_PORT_MASK 0xff
432 #define MVPP2_PRS_LU_MASK 0xf
433 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
434 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
435 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
436 (((offs) * 2) - ((offs) % 2) + 2)
437 #define MVPP2_PRS_TCAM_AI_BYTE 16
438 #define MVPP2_PRS_TCAM_PORT_BYTE 17
439 #define MVPP2_PRS_TCAM_LU_BYTE 20
440 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
441 #define MVPP2_PRS_TCAM_INV_WORD 5
442 /* Tcam entries ID */
443 #define MVPP2_PE_DROP_ALL 0
444 #define MVPP2_PE_FIRST_FREE_TID 1
445 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
446 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
447 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
448 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
449 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
450 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
451 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
452 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
453 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
454 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
455 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
456 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
457 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
458 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
459 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
460 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
461 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
462 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
463 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
464 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
465 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
466 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
467 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
468 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
469 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
472 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
474 #define MVPP2_PRS_SRAM_RI_OFFS 0
475 #define MVPP2_PRS_SRAM_RI_WORD 0
476 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
477 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
478 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
479 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
480 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
481 #define MVPP2_PRS_SRAM_UDF_OFFS 73
482 #define MVPP2_PRS_SRAM_UDF_BITS 8
483 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
484 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
485 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
486 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
487 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
488 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
489 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
490 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
491 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
492 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
493 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
494 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
495 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
496 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
497 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
498 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
500 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
501 #define MVPP2_PRS_SRAM_AI_OFFS 90
502 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
503 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
504 #define MVPP2_PRS_SRAM_AI_MASK 0xff
505 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
506 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
507 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
508 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
510 /* Sram result info bits assignment */
511 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
512 #define MVPP2_PRS_RI_DSA_MASK 0x2
513 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
514 #define MVPP2_PRS_RI_VLAN_NONE 0x0
515 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
516 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
517 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
518 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
519 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
520 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
521 #define MVPP2_PRS_RI_L2_UCAST 0x0
522 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
523 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
524 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
525 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
526 #define MVPP2_PRS_RI_L3_UN 0x0
527 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
528 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
529 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
530 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
531 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
532 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
533 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
534 #define MVPP2_PRS_RI_L3_UCAST 0x0
535 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
536 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
537 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
538 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
539 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
540 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
541 #define MVPP2_PRS_RI_L4_TCP BIT(22)
542 #define MVPP2_PRS_RI_L4_UDP BIT(23)
543 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
544 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
545 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
546 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
548 /* Sram additional info bits assignment */
549 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
550 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
551 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
552 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
553 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
554 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
555 #define MVPP2_PRS_SINGLE_VLAN_AI 0
556 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
559 #define MVPP2_PRS_TAGGED true
560 #define MVPP2_PRS_UNTAGGED false
561 #define MVPP2_PRS_EDSA true
562 #define MVPP2_PRS_DSA false
564 /* MAC entries, shadow udf */
566 MVPP2_PRS_UDF_MAC_DEF,
567 MVPP2_PRS_UDF_MAC_RANGE,
568 MVPP2_PRS_UDF_L2_DEF,
569 MVPP2_PRS_UDF_L2_DEF_COPY,
570 MVPP2_PRS_UDF_L2_USER,
574 enum mvpp2_prs_lookup {
588 enum mvpp2_prs_l3_cast {
589 MVPP2_PRS_L3_UNI_CAST,
590 MVPP2_PRS_L3_MULTI_CAST,
591 MVPP2_PRS_L3_BROAD_CAST
594 /* Classifier constants */
595 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
596 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
597 #define MVPP2_CLS_LKP_TBL_SIZE 64
600 #define MVPP2_BM_POOLS_NUM 8
601 #define MVPP2_BM_LONG_BUF_NUM 1024
602 #define MVPP2_BM_SHORT_BUF_NUM 2048
603 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
604 #define MVPP2_BM_POOL_PTR_ALIGN 128
605 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
606 #define MVPP2_BM_SWF_SHORT_POOL 3
608 /* BM cookie (32 bits) definition */
609 #define MVPP2_BM_COOKIE_POOL_OFFS 8
610 #define MVPP2_BM_COOKIE_CPU_OFFS 24
612 /* BM short pool packet size
613 * These value assure that for SWF the total number
614 * of bytes allocated for each buffer will be 512
616 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
626 /* Shared Packet Processor resources */
628 /* Shared registers' base addresses */
630 void __iomem *lms_base;
636 /* List of pointers to port structures */
637 struct mvpp2_port **port_list;
639 /* Aggregated TXQs */
640 struct mvpp2_tx_queue *aggr_txqs;
643 struct mvpp2_bm_pool *bm_pools;
645 /* PRS shadow table */
646 struct mvpp2_prs_shadow *prs_shadow;
647 /* PRS auxiliary table for double vlan entries control */
648 bool *prs_double_vlans;
654 struct mvpp2_pcpu_stats {
655 struct u64_stats_sync syncp;
662 /* Per-CPU port control */
663 struct mvpp2_port_pcpu {
664 struct hrtimer tx_done_timer;
665 bool timer_scheduled;
666 /* Tasklet for egress finalization */
667 struct tasklet_struct tx_done_tasklet;
677 /* Per-port registers' base address */
680 struct mvpp2_rx_queue **rxqs;
681 struct mvpp2_tx_queue **txqs;
682 struct net_device *dev;
686 u32 pending_cause_rx;
687 struct napi_struct napi;
689 /* Per-CPU port control */
690 struct mvpp2_port_pcpu __percpu *pcpu;
697 struct mvpp2_pcpu_stats __percpu *stats;
699 phy_interface_t phy_interface;
700 struct device_node *phy_node;
705 struct mvpp2_bm_pool *pool_long;
706 struct mvpp2_bm_pool *pool_short;
708 /* Index of first port's physical RXQ */
712 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
713 * layout of the transmit and reception DMA descriptors, and their
714 * layout is therefore defined by the hardware design
717 #define MVPP2_TXD_L3_OFF_SHIFT 0
718 #define MVPP2_TXD_IP_HLEN_SHIFT 8
719 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
720 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
721 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
722 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
723 #define MVPP2_TXD_L4_UDP BIT(24)
724 #define MVPP2_TXD_L3_IP6 BIT(26)
725 #define MVPP2_TXD_L_DESC BIT(28)
726 #define MVPP2_TXD_F_DESC BIT(29)
728 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
729 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
730 #define MVPP2_RXD_ERR_CRC 0x0
731 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
732 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
733 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
734 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
735 #define MVPP2_RXD_HWF_SYNC BIT(21)
736 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
737 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
738 #define MVPP2_RXD_L4_TCP BIT(25)
739 #define MVPP2_RXD_L4_UDP BIT(26)
740 #define MVPP2_RXD_L3_IP4 BIT(28)
741 #define MVPP2_RXD_L3_IP6 BIT(30)
742 #define MVPP2_RXD_BUF_HDR BIT(31)
744 struct mvpp2_tx_desc {
745 u32 command; /* Options used by HW for packet transmitting.*/
746 u8 packet_offset; /* the offset from the buffer beginning */
747 u8 phys_txq; /* destination queue ID */
748 u16 data_size; /* data size of transmitted packet in bytes */
749 u32 buf_phys_addr; /* physical addr of transmitted buffer */
750 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
751 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
752 u32 reserved2; /* reserved (for future use) */
755 struct mvpp2_rx_desc {
756 u32 status; /* info about received packet */
757 u16 reserved1; /* parser_info (for future use, PnC) */
758 u16 data_size; /* size of received packet in bytes */
759 u32 buf_phys_addr; /* physical address of the buffer */
760 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
761 u16 reserved2; /* gem_port_id (for future use, PON) */
762 u16 reserved3; /* csum_l4 (for future use, PnC) */
763 u8 reserved4; /* bm_qset (for future use, BM) */
765 u16 reserved6; /* classify_info (for future use, PnC) */
766 u32 reserved7; /* flow_id (for future use, PnC) */
770 struct mvpp2_txq_pcpu_buf {
771 /* Transmitted SKB */
774 /* Physical address of transmitted buffer */
777 /* Size transmitted */
781 /* Per-CPU Tx queue control */
782 struct mvpp2_txq_pcpu {
785 /* Number of Tx DMA descriptors in the descriptor ring */
788 /* Number of currently used Tx DMA descriptor in the
793 /* Number of Tx DMA descriptors reserved for each CPU */
796 /* Infos about transmitted buffers */
797 struct mvpp2_txq_pcpu_buf *buffs;
799 /* Index of last TX DMA descriptor that was inserted */
802 /* Index of the TX DMA descriptor to be cleaned up */
806 struct mvpp2_tx_queue {
807 /* Physical number of this Tx queue */
810 /* Logical number of this Tx queue */
813 /* Number of Tx DMA descriptors in the descriptor ring */
816 /* Number of currently used Tx DMA descriptor in the descriptor ring */
819 /* Per-CPU control of physical Tx queues */
820 struct mvpp2_txq_pcpu __percpu *pcpu;
824 /* Virtual address of thex Tx DMA descriptors array */
825 struct mvpp2_tx_desc *descs;
827 /* DMA address of the Tx DMA descriptors array */
828 dma_addr_t descs_phys;
830 /* Index of the last Tx DMA descriptor */
833 /* Index of the next Tx DMA descriptor to process */
834 int next_desc_to_proc;
837 struct mvpp2_rx_queue {
838 /* RX queue number, in the range 0-31 for physical RXQs */
841 /* Num of rx descriptors in the rx descriptor ring */
847 /* Virtual address of the RX DMA descriptors array */
848 struct mvpp2_rx_desc *descs;
850 /* DMA address of the RX DMA descriptors array */
851 dma_addr_t descs_phys;
853 /* Index of the last RX DMA descriptor */
856 /* Index of the next RX DMA descriptor to process */
857 int next_desc_to_proc;
859 /* ID of port to which physical RXQ is mapped */
862 /* Port's logic RXQ number to which physical RXQ is mapped */
866 union mvpp2_prs_tcam_entry {
867 u32 word[MVPP2_PRS_TCAM_WORDS];
868 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
871 union mvpp2_prs_sram_entry {
872 u32 word[MVPP2_PRS_SRAM_WORDS];
873 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
876 struct mvpp2_prs_entry {
878 union mvpp2_prs_tcam_entry tcam;
879 union mvpp2_prs_sram_entry sram;
882 struct mvpp2_prs_shadow {
889 /* User defined offset */
897 struct mvpp2_cls_flow_entry {
899 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
902 struct mvpp2_cls_lookup_entry {
908 struct mvpp2_bm_pool {
909 /* Pool number in the range 0-7 */
911 enum mvpp2_bm_type type;
913 /* Buffer Pointers Pool External (BPPE) size */
915 /* Number of buffers for this pool */
917 /* Pool buffer size */
923 /* BPPE virtual base address */
925 /* BPPE physical base address */
926 dma_addr_t phys_addr;
928 /* Ports using BM pool */
932 struct mvpp2_buff_hdr {
933 u32 next_buff_phys_addr;
934 u32 next_buff_virt_addr;
937 u8 reserved1; /* bm_qset (for future use, BM) */
940 /* Buffer header info bits */
941 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
942 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
943 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
944 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
945 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
946 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
948 /* Static declaractions */
950 /* Number of RXQs used by single port */
951 static int rxq_number = MVPP2_DEFAULT_RXQ;
952 /* Number of TXQs used by single port */
953 static int txq_number = MVPP2_MAX_TXQ;
955 #define MVPP2_DRIVER_NAME "mvpp2"
956 #define MVPP2_DRIVER_VERSION "1.0"
958 /* Utility/helper methods */
960 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
962 writel(data, priv->base + offset);
965 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
967 return readl(priv->base + offset);
970 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
972 txq_pcpu->txq_get_index++;
973 if (txq_pcpu->txq_get_index == txq_pcpu->size)
974 txq_pcpu->txq_get_index = 0;
977 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
979 struct mvpp2_tx_desc *tx_desc)
981 struct mvpp2_txq_pcpu_buf *tx_buf =
982 txq_pcpu->buffs + txq_pcpu->txq_put_index;
984 tx_buf->size = tx_desc->data_size;
985 tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
986 txq_pcpu->txq_put_index++;
987 if (txq_pcpu->txq_put_index == txq_pcpu->size)
988 txq_pcpu->txq_put_index = 0;
991 /* Get number of physical egress port */
992 static inline int mvpp2_egress_port(struct mvpp2_port *port)
994 return MVPP2_MAX_TCONT + port->id;
997 /* Get number of physical TXQ */
998 static inline int mvpp2_txq_phys(int port, int txq)
1000 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1003 /* Parser configuration routines */
1005 /* Update parser tcam and sram hw entries */
1006 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1010 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1013 /* Clear entry invalidation bit */
1014 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1016 /* Write tcam index - indirect access */
1017 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1018 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1019 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1021 /* Write sram index - indirect access */
1022 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1023 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1024 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1029 /* Read tcam entry from hw */
1030 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1034 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1037 /* Write tcam index - indirect access */
1038 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1040 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1041 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1042 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1043 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1045 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1046 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1048 /* Write sram index - indirect access */
1049 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1050 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1051 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1056 /* Invalidate tcam hw entry */
1057 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1059 /* Write index - indirect access */
1060 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1061 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1062 MVPP2_PRS_TCAM_INV_MASK);
1065 /* Enable shadow table entry and set its lookup ID */
1066 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1068 priv->prs_shadow[index].valid = true;
1069 priv->prs_shadow[index].lu = lu;
1072 /* Update ri fields in shadow table entry */
1073 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1074 unsigned int ri, unsigned int ri_mask)
1076 priv->prs_shadow[index].ri_mask = ri_mask;
1077 priv->prs_shadow[index].ri = ri;
1080 /* Update lookup field in tcam sw entry */
1081 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1083 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1085 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1086 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1089 /* Update mask for single port in tcam sw entry */
1090 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1091 unsigned int port, bool add)
1093 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1096 pe->tcam.byte[enable_off] &= ~(1 << port);
1098 pe->tcam.byte[enable_off] |= 1 << port;
1101 /* Update port map in tcam sw entry */
1102 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1105 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1106 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1108 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1109 pe->tcam.byte[enable_off] &= ~port_mask;
1110 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1113 /* Obtain port map from tcam sw entry */
1114 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1116 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1118 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1121 /* Set byte of data and its enable bits in tcam sw entry */
1122 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1123 unsigned int offs, unsigned char byte,
1124 unsigned char enable)
1126 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1127 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1130 /* Get byte of data and its enable bits from tcam sw entry */
1131 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1132 unsigned int offs, unsigned char *byte,
1133 unsigned char *enable)
1135 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1136 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1139 /* Compare tcam data bytes with a pattern */
1140 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1143 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1146 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1147 if (tcam_data != data)
1152 /* Update ai bits in tcam sw entry */
1153 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1154 unsigned int bits, unsigned int enable)
1156 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1158 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1160 if (!(enable & BIT(i)))
1164 pe->tcam.byte[ai_idx] |= 1 << i;
1166 pe->tcam.byte[ai_idx] &= ~(1 << i);
1169 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1172 /* Get ai bits from tcam sw entry */
1173 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1175 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1178 /* Set ethertype in tcam sw entry */
1179 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1180 unsigned short ethertype)
1182 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1183 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1186 /* Set bits in sram sw entry */
1187 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1190 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1193 /* Clear bits in sram sw entry */
1194 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1197 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1200 /* Update ri bits in sram sw entry */
1201 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1202 unsigned int bits, unsigned int mask)
1206 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1207 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1209 if (!(mask & BIT(i)))
1213 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1215 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1217 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1221 /* Obtain ri bits from sram sw entry */
1222 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1224 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1227 /* Update ai bits in sram sw entry */
1228 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1229 unsigned int bits, unsigned int mask)
1232 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1234 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1236 if (!(mask & BIT(i)))
1240 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1242 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1244 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1248 /* Read ai bits from sram sw entry */
1249 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1252 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1253 int ai_en_off = ai_off + 1;
1254 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1256 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1257 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1262 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1265 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1268 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1270 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1271 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1272 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1275 /* In the sram sw entry set sign and value of the next lookup offset
1276 * and the offset value generated to the classifier
1278 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1283 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1286 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1290 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1291 (unsigned char)shift;
1293 /* Reset and set operation */
1294 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1295 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1296 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1298 /* Set base offset as current */
1299 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1302 /* In the sram sw entry set sign and value of the user defined offset
1303 * generated to the classifier
1305 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1306 unsigned int type, int offset,
1311 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1312 offset = 0 - offset;
1314 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1318 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1319 MVPP2_PRS_SRAM_UDF_MASK);
1320 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1321 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1322 MVPP2_PRS_SRAM_UDF_BITS)] &=
1323 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1324 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1325 MVPP2_PRS_SRAM_UDF_BITS)] |=
1326 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1328 /* Set offset type */
1329 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1330 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1331 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1333 /* Set offset operation */
1334 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1335 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1336 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1338 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1339 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1340 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1341 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1343 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1344 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1345 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1347 /* Set base offset as current */
1348 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1351 /* Find parser flow entry */
1352 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1354 struct mvpp2_prs_entry *pe;
1357 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1360 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1362 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1363 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1366 if (!priv->prs_shadow[tid].valid ||
1367 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1371 mvpp2_prs_hw_read(priv, pe);
1372 bits = mvpp2_prs_sram_ai_get(pe);
1374 /* Sram store classification lookup ID in AI bits [5:0] */
1375 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1383 /* Return first free tcam index, seeking from start to end */
1384 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1392 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1393 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1395 for (tid = start; tid <= end; tid++) {
1396 if (!priv->prs_shadow[tid].valid)
1403 /* Enable/disable dropping all mac da's */
1404 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1406 struct mvpp2_prs_entry pe;
1408 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1409 /* Entry exist - update port only */
1410 pe.index = MVPP2_PE_DROP_ALL;
1411 mvpp2_prs_hw_read(priv, &pe);
1413 /* Entry doesn't exist - create new */
1414 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1415 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1416 pe.index = MVPP2_PE_DROP_ALL;
1418 /* Non-promiscuous mode for all ports - DROP unknown packets */
1419 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1420 MVPP2_PRS_RI_DROP_MASK);
1422 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1423 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1425 /* Update shadow table */
1426 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1428 /* Mask all ports */
1429 mvpp2_prs_tcam_port_map_set(&pe, 0);
1432 /* Update port mask */
1433 mvpp2_prs_tcam_port_set(&pe, port, add);
1435 mvpp2_prs_hw_write(priv, &pe);
1438 /* Set port to promiscuous mode */
1439 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1441 struct mvpp2_prs_entry pe;
1443 /* Promiscuous mode - Accept unknown packets */
1445 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1446 /* Entry exist - update port only */
1447 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1448 mvpp2_prs_hw_read(priv, &pe);
1450 /* Entry doesn't exist - create new */
1451 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1452 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1453 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1455 /* Continue - set next lookup */
1456 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1458 /* Set result info bits */
1459 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1460 MVPP2_PRS_RI_L2_CAST_MASK);
1462 /* Shift to ethertype */
1463 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1464 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1466 /* Mask all ports */
1467 mvpp2_prs_tcam_port_map_set(&pe, 0);
1469 /* Update shadow table */
1470 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1473 /* Update port mask */
1474 mvpp2_prs_tcam_port_set(&pe, port, add);
1476 mvpp2_prs_hw_write(priv, &pe);
1479 /* Accept multicast */
1480 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1483 struct mvpp2_prs_entry pe;
1484 unsigned char da_mc;
1486 /* Ethernet multicast address first byte is
1487 * 0x01 for IPv4 and 0x33 for IPv6
1489 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1491 if (priv->prs_shadow[index].valid) {
1492 /* Entry exist - update port only */
1494 mvpp2_prs_hw_read(priv, &pe);
1496 /* Entry doesn't exist - create new */
1497 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1498 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1501 /* Continue - set next lookup */
1502 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1504 /* Set result info bits */
1505 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1506 MVPP2_PRS_RI_L2_CAST_MASK);
1508 /* Update tcam entry data first byte */
1509 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1511 /* Shift to ethertype */
1512 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1513 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1515 /* Mask all ports */
1516 mvpp2_prs_tcam_port_map_set(&pe, 0);
1518 /* Update shadow table */
1519 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1522 /* Update port mask */
1523 mvpp2_prs_tcam_port_set(&pe, port, add);
1525 mvpp2_prs_hw_write(priv, &pe);
1528 /* Set entry for dsa packets */
1529 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1530 bool tagged, bool extend)
1532 struct mvpp2_prs_entry pe;
1536 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1539 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1543 if (priv->prs_shadow[tid].valid) {
1544 /* Entry exist - update port only */
1546 mvpp2_prs_hw_read(priv, &pe);
1548 /* Entry doesn't exist - create new */
1549 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1550 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1553 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1554 mvpp2_prs_sram_shift_set(&pe, shift,
1555 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1557 /* Update shadow table */
1558 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1561 /* Set tagged bit in DSA tag */
1562 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1563 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1564 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1565 /* Clear all ai bits for next iteration */
1566 mvpp2_prs_sram_ai_update(&pe, 0,
1567 MVPP2_PRS_SRAM_AI_MASK);
1568 /* If packet is tagged continue check vlans */
1569 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1571 /* Set result info bits to 'no vlans' */
1572 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1573 MVPP2_PRS_RI_VLAN_MASK);
1574 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1577 /* Mask all ports */
1578 mvpp2_prs_tcam_port_map_set(&pe, 0);
1581 /* Update port mask */
1582 mvpp2_prs_tcam_port_set(&pe, port, add);
1584 mvpp2_prs_hw_write(priv, &pe);
1587 /* Set entry for dsa ethertype */
1588 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1589 bool add, bool tagged, bool extend)
1591 struct mvpp2_prs_entry pe;
1592 int tid, shift, port_mask;
1595 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1596 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1600 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1601 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1602 port_mask = MVPP2_PRS_PORT_MASK;
1606 if (priv->prs_shadow[tid].valid) {
1607 /* Entry exist - update port only */
1609 mvpp2_prs_hw_read(priv, &pe);
1611 /* Entry doesn't exist - create new */
1612 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1613 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1617 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1618 mvpp2_prs_match_etype(&pe, 2, 0);
1620 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1621 MVPP2_PRS_RI_DSA_MASK);
1622 /* Shift ethertype + 2 byte reserved + tag*/
1623 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1624 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1626 /* Update shadow table */
1627 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1630 /* Set tagged bit in DSA tag */
1631 mvpp2_prs_tcam_data_byte_set(&pe,
1632 MVPP2_ETH_TYPE_LEN + 2 + 3,
1633 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1634 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1635 /* Clear all ai bits for next iteration */
1636 mvpp2_prs_sram_ai_update(&pe, 0,
1637 MVPP2_PRS_SRAM_AI_MASK);
1638 /* If packet is tagged continue check vlans */
1639 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1641 /* Set result info bits to 'no vlans' */
1642 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1643 MVPP2_PRS_RI_VLAN_MASK);
1644 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1646 /* Mask/unmask all ports, depending on dsa type */
1647 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1650 /* Update port mask */
1651 mvpp2_prs_tcam_port_set(&pe, port, add);
1653 mvpp2_prs_hw_write(priv, &pe);
1656 /* Search for existing single/triple vlan entry */
1657 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1658 unsigned short tpid, int ai)
1660 struct mvpp2_prs_entry *pe;
1663 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1666 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1668 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1669 for (tid = MVPP2_PE_FIRST_FREE_TID;
1670 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1671 unsigned int ri_bits, ai_bits;
1674 if (!priv->prs_shadow[tid].valid ||
1675 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1680 mvpp2_prs_hw_read(priv, pe);
1681 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1686 ri_bits = mvpp2_prs_sram_ri_get(pe);
1687 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1689 /* Get current ai value from tcam */
1690 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1691 /* Clear double vlan bit */
1692 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1697 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1698 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1706 /* Add/update single/triple vlan entry */
1707 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1708 unsigned int port_map)
1710 struct mvpp2_prs_entry *pe;
1714 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1717 /* Create new tcam entry */
1718 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1719 MVPP2_PE_FIRST_FREE_TID);
1723 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1727 /* Get last double vlan tid */
1728 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1729 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1730 unsigned int ri_bits;
1732 if (!priv->prs_shadow[tid_aux].valid ||
1733 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1736 pe->index = tid_aux;
1737 mvpp2_prs_hw_read(priv, pe);
1738 ri_bits = mvpp2_prs_sram_ri_get(pe);
1739 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1740 MVPP2_PRS_RI_VLAN_DOUBLE)
1744 if (tid <= tid_aux) {
1749 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1750 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1753 mvpp2_prs_match_etype(pe, 0, tpid);
1755 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1756 /* Shift 4 bytes - skip 1 vlan tag */
1757 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1758 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1759 /* Clear all ai bits for next iteration */
1760 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1762 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1763 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1764 MVPP2_PRS_RI_VLAN_MASK);
1766 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1767 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1768 MVPP2_PRS_RI_VLAN_MASK);
1770 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1772 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1774 /* Update ports' mask */
1775 mvpp2_prs_tcam_port_map_set(pe, port_map);
1777 mvpp2_prs_hw_write(priv, pe);
1785 /* Get first free double vlan ai number */
1786 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1790 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1791 if (!priv->prs_double_vlans[i])
1798 /* Search for existing double vlan entry */
1799 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1800 unsigned short tpid1,
1801 unsigned short tpid2)
1803 struct mvpp2_prs_entry *pe;
1806 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1809 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1811 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1812 for (tid = MVPP2_PE_FIRST_FREE_TID;
1813 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1814 unsigned int ri_mask;
1817 if (!priv->prs_shadow[tid].valid ||
1818 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1822 mvpp2_prs_hw_read(priv, pe);
1824 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1825 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1830 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1831 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1839 /* Add or update double vlan entry */
1840 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1841 unsigned short tpid2,
1842 unsigned int port_map)
1844 struct mvpp2_prs_entry *pe;
1845 int tid_aux, tid, ai, ret = 0;
1847 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1850 /* Create new tcam entry */
1851 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1852 MVPP2_PE_LAST_FREE_TID);
1856 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1860 /* Set ai value for new double vlan entry */
1861 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1867 /* Get first single/triple vlan tid */
1868 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1869 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1870 unsigned int ri_bits;
1872 if (!priv->prs_shadow[tid_aux].valid ||
1873 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1876 pe->index = tid_aux;
1877 mvpp2_prs_hw_read(priv, pe);
1878 ri_bits = mvpp2_prs_sram_ri_get(pe);
1879 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1880 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1881 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1885 if (tid >= tid_aux) {
1890 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1891 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1894 priv->prs_double_vlans[ai] = true;
1896 mvpp2_prs_match_etype(pe, 0, tpid1);
1897 mvpp2_prs_match_etype(pe, 4, tpid2);
1899 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1900 /* Shift 8 bytes - skip 2 vlan tags */
1901 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1902 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1903 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1904 MVPP2_PRS_RI_VLAN_MASK);
1905 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1906 MVPP2_PRS_SRAM_AI_MASK);
1908 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1911 /* Update ports' mask */
1912 mvpp2_prs_tcam_port_map_set(pe, port_map);
1913 mvpp2_prs_hw_write(priv, pe);
1920 /* IPv4 header parsing for fragmentation and L4 offset */
1921 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1922 unsigned int ri, unsigned int ri_mask)
1924 struct mvpp2_prs_entry pe;
1927 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1928 (proto != IPPROTO_IGMP))
1931 /* Fragmented packet */
1932 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1933 MVPP2_PE_LAST_FREE_TID);
1937 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1938 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1941 /* Set next lu to IPv4 */
1942 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1943 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1945 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1946 sizeof(struct iphdr) - 4,
1947 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1948 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1949 MVPP2_PRS_IPV4_DIP_AI_BIT);
1950 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1951 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1953 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1954 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1955 /* Unmask all ports */
1956 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1958 /* Update shadow table and hw entry */
1959 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1960 mvpp2_prs_hw_write(priv, &pe);
1962 /* Not fragmented packet */
1963 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1964 MVPP2_PE_LAST_FREE_TID);
1969 /* Clear ri before updating */
1970 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1971 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1972 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1974 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1975 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1977 /* Update shadow table and hw entry */
1978 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1979 mvpp2_prs_hw_write(priv, &pe);
1984 /* IPv4 L3 multicast or broadcast */
1985 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1987 struct mvpp2_prs_entry pe;
1990 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1991 MVPP2_PE_LAST_FREE_TID);
1995 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1996 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2000 case MVPP2_PRS_L3_MULTI_CAST:
2001 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2002 MVPP2_PRS_IPV4_MC_MASK);
2003 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2004 MVPP2_PRS_RI_L3_ADDR_MASK);
2006 case MVPP2_PRS_L3_BROAD_CAST:
2007 mask = MVPP2_PRS_IPV4_BC_MASK;
2008 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2009 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2010 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2011 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2013 MVPP2_PRS_RI_L3_ADDR_MASK);
2019 /* Finished: go to flowid generation */
2020 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2021 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2023 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2024 MVPP2_PRS_IPV4_DIP_AI_BIT);
2025 /* Unmask all ports */
2026 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2028 /* Update shadow table and hw entry */
2029 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2030 mvpp2_prs_hw_write(priv, &pe);
2035 /* Set entries for protocols over IPv6 */
2036 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2037 unsigned int ri, unsigned int ri_mask)
2039 struct mvpp2_prs_entry pe;
2042 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2043 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2046 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2047 MVPP2_PE_LAST_FREE_TID);
2051 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2052 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2055 /* Finished: go to flowid generation */
2056 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2057 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2058 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2059 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2060 sizeof(struct ipv6hdr) - 6,
2061 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2063 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2064 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2065 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2066 /* Unmask all ports */
2067 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2070 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2071 mvpp2_prs_hw_write(priv, &pe);
2076 /* IPv6 L3 multicast entry */
2077 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2079 struct mvpp2_prs_entry pe;
2082 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2085 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2086 MVPP2_PE_LAST_FREE_TID);
2090 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2091 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2094 /* Finished: go to flowid generation */
2095 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2096 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2097 MVPP2_PRS_RI_L3_ADDR_MASK);
2098 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2099 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2100 /* Shift back to IPv6 NH */
2101 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2103 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2104 MVPP2_PRS_IPV6_MC_MASK);
2105 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2106 /* Unmask all ports */
2107 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2109 /* Update shadow table and hw entry */
2110 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2111 mvpp2_prs_hw_write(priv, &pe);
2116 /* Parser per-port initialization */
2117 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2118 int lu_max, int offset)
2123 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2124 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2125 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2126 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2128 /* Set maximum number of loops for packet received from port */
2129 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2130 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2131 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2132 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2134 /* Set initial offset for packet header extraction for the first
2137 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2138 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2139 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2140 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2143 /* Default flow entries initialization for all ports */
2144 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2146 struct mvpp2_prs_entry pe;
2149 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2150 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2151 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2152 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2154 /* Mask all ports */
2155 mvpp2_prs_tcam_port_map_set(&pe, 0);
2158 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2159 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2161 /* Update shadow table and hw entry */
2162 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2163 mvpp2_prs_hw_write(priv, &pe);
2167 /* Set default entry for Marvell Header field */
2168 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2170 struct mvpp2_prs_entry pe;
2172 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2174 pe.index = MVPP2_PE_MH_DEFAULT;
2175 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2176 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2177 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2178 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2180 /* Unmask all ports */
2181 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2183 /* Update shadow table and hw entry */
2184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2185 mvpp2_prs_hw_write(priv, &pe);
2188 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2189 * multicast MAC addresses
2191 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2193 struct mvpp2_prs_entry pe;
2195 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2197 /* Non-promiscuous mode for all ports - DROP unknown packets */
2198 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2199 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2201 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2202 MVPP2_PRS_RI_DROP_MASK);
2203 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2204 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2206 /* Unmask all ports */
2207 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2209 /* Update shadow table and hw entry */
2210 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2211 mvpp2_prs_hw_write(priv, &pe);
2213 /* place holders only - no ports */
2214 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2215 mvpp2_prs_mac_promisc_set(priv, 0, false);
2216 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2217 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2220 /* Set default entries for various types of dsa packets */
2221 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2223 struct mvpp2_prs_entry pe;
2225 /* None tagged EDSA entry - place holder */
2226 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2229 /* Tagged EDSA entry - place holder */
2230 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2232 /* None tagged DSA entry - place holder */
2233 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2236 /* Tagged DSA entry - place holder */
2237 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2239 /* None tagged EDSA ethertype entry - place holder*/
2240 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2241 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2243 /* Tagged EDSA ethertype entry - place holder*/
2244 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2245 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2247 /* None tagged DSA ethertype entry */
2248 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2249 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2251 /* Tagged DSA ethertype entry */
2252 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2253 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2255 /* Set default entry, in case DSA or EDSA tag not found */
2256 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2257 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2258 pe.index = MVPP2_PE_DSA_DEFAULT;
2259 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2262 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2263 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2265 /* Clear all sram ai bits for next iteration */
2266 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2268 /* Unmask all ports */
2269 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2271 mvpp2_prs_hw_write(priv, &pe);
2274 /* Match basic ethertypes */
2275 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2277 struct mvpp2_prs_entry pe;
2280 /* Ethertype: PPPoE */
2281 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2282 MVPP2_PE_LAST_FREE_TID);
2286 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2287 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2290 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2292 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2293 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2294 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2295 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2296 MVPP2_PRS_RI_PPPOE_MASK);
2298 /* Update shadow table and hw entry */
2299 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2300 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2301 priv->prs_shadow[pe.index].finish = false;
2302 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2303 MVPP2_PRS_RI_PPPOE_MASK);
2304 mvpp2_prs_hw_write(priv, &pe);
2306 /* Ethertype: ARP */
2307 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2308 MVPP2_PE_LAST_FREE_TID);
2312 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2313 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2316 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2318 /* Generate flow in the next iteration*/
2319 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2320 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2321 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2322 MVPP2_PRS_RI_L3_PROTO_MASK);
2324 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2326 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2328 /* Update shadow table and hw entry */
2329 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2330 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2331 priv->prs_shadow[pe.index].finish = true;
2332 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2333 MVPP2_PRS_RI_L3_PROTO_MASK);
2334 mvpp2_prs_hw_write(priv, &pe);
2336 /* Ethertype: LBTD */
2337 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2338 MVPP2_PE_LAST_FREE_TID);
2342 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2343 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2346 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2348 /* Generate flow in the next iteration*/
2349 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2350 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2351 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2352 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2353 MVPP2_PRS_RI_CPU_CODE_MASK |
2354 MVPP2_PRS_RI_UDF3_MASK);
2356 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2358 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2360 /* Update shadow table and hw entry */
2361 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2362 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2363 priv->prs_shadow[pe.index].finish = true;
2364 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2365 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2366 MVPP2_PRS_RI_CPU_CODE_MASK |
2367 MVPP2_PRS_RI_UDF3_MASK);
2368 mvpp2_prs_hw_write(priv, &pe);
2370 /* Ethertype: IPv4 without options */
2371 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2372 MVPP2_PE_LAST_FREE_TID);
2376 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2377 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2380 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2381 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2382 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2383 MVPP2_PRS_IPV4_HEAD_MASK |
2384 MVPP2_PRS_IPV4_IHL_MASK);
2386 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2387 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2388 MVPP2_PRS_RI_L3_PROTO_MASK);
2389 /* Skip eth_type + 4 bytes of IP header */
2390 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2391 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2393 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2395 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2397 /* Update shadow table and hw entry */
2398 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2399 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2400 priv->prs_shadow[pe.index].finish = false;
2401 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2402 MVPP2_PRS_RI_L3_PROTO_MASK);
2403 mvpp2_prs_hw_write(priv, &pe);
2405 /* Ethertype: IPv4 with options */
2406 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2407 MVPP2_PE_LAST_FREE_TID);
2413 /* Clear tcam data before updating */
2414 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2415 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2417 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2418 MVPP2_PRS_IPV4_HEAD,
2419 MVPP2_PRS_IPV4_HEAD_MASK);
2421 /* Clear ri before updating */
2422 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2423 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2424 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2425 MVPP2_PRS_RI_L3_PROTO_MASK);
2427 /* Update shadow table and hw entry */
2428 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2429 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2430 priv->prs_shadow[pe.index].finish = false;
2431 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2432 MVPP2_PRS_RI_L3_PROTO_MASK);
2433 mvpp2_prs_hw_write(priv, &pe);
2435 /* Ethertype: IPv6 without options */
2436 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2437 MVPP2_PE_LAST_FREE_TID);
2441 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2442 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2445 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2447 /* Skip DIP of IPV6 header */
2448 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2449 MVPP2_MAX_L3_ADDR_SIZE,
2450 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2451 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2452 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2453 MVPP2_PRS_RI_L3_PROTO_MASK);
2455 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2457 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2459 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2460 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2461 priv->prs_shadow[pe.index].finish = false;
2462 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2463 MVPP2_PRS_RI_L3_PROTO_MASK);
2464 mvpp2_prs_hw_write(priv, &pe);
2466 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2467 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2468 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2469 pe.index = MVPP2_PE_ETH_TYPE_UN;
2471 /* Unmask all ports */
2472 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2474 /* Generate flow in the next iteration*/
2475 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2476 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2477 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2478 MVPP2_PRS_RI_L3_PROTO_MASK);
2479 /* Set L3 offset even it's unknown L3 */
2480 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2482 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2484 /* Update shadow table and hw entry */
2485 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2486 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2487 priv->prs_shadow[pe.index].finish = true;
2488 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2489 MVPP2_PRS_RI_L3_PROTO_MASK);
2490 mvpp2_prs_hw_write(priv, &pe);
2495 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2502 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2504 struct mvpp2_prs_entry pe;
2507 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2508 MVPP2_PRS_DBL_VLANS_MAX,
2510 if (!priv->prs_double_vlans)
2513 /* Double VLAN: 0x8100, 0x88A8 */
2514 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2515 MVPP2_PRS_PORT_MASK);
2519 /* Double VLAN: 0x8100, 0x8100 */
2520 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2521 MVPP2_PRS_PORT_MASK);
2525 /* Single VLAN: 0x88a8 */
2526 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2527 MVPP2_PRS_PORT_MASK);
2531 /* Single VLAN: 0x8100 */
2532 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2533 MVPP2_PRS_PORT_MASK);
2537 /* Set default double vlan entry */
2538 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2539 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2540 pe.index = MVPP2_PE_VLAN_DBL;
2542 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2543 /* Clear ai for next iterations */
2544 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2545 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2546 MVPP2_PRS_RI_VLAN_MASK);
2548 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2549 MVPP2_PRS_DBL_VLAN_AI_BIT);
2550 /* Unmask all ports */
2551 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2553 /* Update shadow table and hw entry */
2554 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2555 mvpp2_prs_hw_write(priv, &pe);
2557 /* Set default vlan none entry */
2558 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2559 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2560 pe.index = MVPP2_PE_VLAN_NONE;
2562 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2563 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2564 MVPP2_PRS_RI_VLAN_MASK);
2566 /* Unmask all ports */
2567 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2569 /* Update shadow table and hw entry */
2570 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2571 mvpp2_prs_hw_write(priv, &pe);
2576 /* Set entries for PPPoE ethertype */
2577 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2579 struct mvpp2_prs_entry pe;
2582 /* IPv4 over PPPoE with options */
2583 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2584 MVPP2_PE_LAST_FREE_TID);
2588 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2589 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2592 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2594 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2595 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2596 MVPP2_PRS_RI_L3_PROTO_MASK);
2597 /* Skip eth_type + 4 bytes of IP header */
2598 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2599 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2601 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2603 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2605 /* Update shadow table and hw entry */
2606 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2607 mvpp2_prs_hw_write(priv, &pe);
2609 /* IPv4 over PPPoE without options */
2610 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2611 MVPP2_PE_LAST_FREE_TID);
2617 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2618 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2619 MVPP2_PRS_IPV4_HEAD_MASK |
2620 MVPP2_PRS_IPV4_IHL_MASK);
2622 /* Clear ri before updating */
2623 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2624 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2626 MVPP2_PRS_RI_L3_PROTO_MASK);
2628 /* Update shadow table and hw entry */
2629 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2630 mvpp2_prs_hw_write(priv, &pe);
2632 /* IPv6 over PPPoE */
2633 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2634 MVPP2_PE_LAST_FREE_TID);
2638 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2639 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2642 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2644 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2645 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2646 MVPP2_PRS_RI_L3_PROTO_MASK);
2647 /* Skip eth_type + 4 bytes of IPv6 header */
2648 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2649 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2651 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2653 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2655 /* Update shadow table and hw entry */
2656 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2657 mvpp2_prs_hw_write(priv, &pe);
2659 /* Non-IP over PPPoE */
2660 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2661 MVPP2_PE_LAST_FREE_TID);
2665 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2666 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2669 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2670 MVPP2_PRS_RI_L3_PROTO_MASK);
2672 /* Finished: go to flowid generation */
2673 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2674 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2675 /* Set L3 offset even if it's unknown L3 */
2676 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2678 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2680 /* Update shadow table and hw entry */
2681 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2682 mvpp2_prs_hw_write(priv, &pe);
2687 /* Initialize entries for IPv4 */
2688 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2690 struct mvpp2_prs_entry pe;
2693 /* Set entries for TCP, UDP and IGMP over IPv4 */
2694 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2695 MVPP2_PRS_RI_L4_PROTO_MASK);
2699 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2700 MVPP2_PRS_RI_L4_PROTO_MASK);
2704 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2705 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2706 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2707 MVPP2_PRS_RI_CPU_CODE_MASK |
2708 MVPP2_PRS_RI_UDF3_MASK);
2712 /* IPv4 Broadcast */
2713 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2717 /* IPv4 Multicast */
2718 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2722 /* Default IPv4 entry for unknown protocols */
2723 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2724 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2725 pe.index = MVPP2_PE_IP4_PROTO_UN;
2727 /* Set next lu to IPv4 */
2728 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2729 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2731 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2732 sizeof(struct iphdr) - 4,
2733 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2734 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2735 MVPP2_PRS_IPV4_DIP_AI_BIT);
2736 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2737 MVPP2_PRS_RI_L4_PROTO_MASK);
2739 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2740 /* Unmask all ports */
2741 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2743 /* Update shadow table and hw entry */
2744 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2745 mvpp2_prs_hw_write(priv, &pe);
2747 /* Default IPv4 entry for unicast address */
2748 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2749 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2750 pe.index = MVPP2_PE_IP4_ADDR_UN;
2752 /* Finished: go to flowid generation */
2753 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2754 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2755 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2756 MVPP2_PRS_RI_L3_ADDR_MASK);
2758 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2759 MVPP2_PRS_IPV4_DIP_AI_BIT);
2760 /* Unmask all ports */
2761 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2763 /* Update shadow table and hw entry */
2764 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2765 mvpp2_prs_hw_write(priv, &pe);
2770 /* Initialize entries for IPv6 */
2771 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2773 struct mvpp2_prs_entry pe;
2776 /* Set entries for TCP, UDP and ICMP over IPv6 */
2777 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2778 MVPP2_PRS_RI_L4_TCP,
2779 MVPP2_PRS_RI_L4_PROTO_MASK);
2783 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2784 MVPP2_PRS_RI_L4_UDP,
2785 MVPP2_PRS_RI_L4_PROTO_MASK);
2789 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2790 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2791 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2792 MVPP2_PRS_RI_CPU_CODE_MASK |
2793 MVPP2_PRS_RI_UDF3_MASK);
2797 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2798 /* Result Info: UDF7=1, DS lite */
2799 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2800 MVPP2_PRS_RI_UDF7_IP6_LITE,
2801 MVPP2_PRS_RI_UDF7_MASK);
2805 /* IPv6 multicast */
2806 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2810 /* Entry for checking hop limit */
2811 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2812 MVPP2_PE_LAST_FREE_TID);
2816 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2817 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2820 /* Finished: go to flowid generation */
2821 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2822 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2823 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2824 MVPP2_PRS_RI_DROP_MASK,
2825 MVPP2_PRS_RI_L3_PROTO_MASK |
2826 MVPP2_PRS_RI_DROP_MASK);
2828 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2829 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2830 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2832 /* Update shadow table and hw entry */
2833 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2834 mvpp2_prs_hw_write(priv, &pe);
2836 /* Default IPv6 entry for unknown protocols */
2837 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2838 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2839 pe.index = MVPP2_PE_IP6_PROTO_UN;
2841 /* Finished: go to flowid generation */
2842 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2843 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2844 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2845 MVPP2_PRS_RI_L4_PROTO_MASK);
2846 /* Set L4 offset relatively to our current place */
2847 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2848 sizeof(struct ipv6hdr) - 4,
2849 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2851 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2852 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2853 /* Unmask all ports */
2854 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2856 /* Update shadow table and hw entry */
2857 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2858 mvpp2_prs_hw_write(priv, &pe);
2860 /* Default IPv6 entry for unknown ext protocols */
2861 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2862 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2863 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2865 /* Finished: go to flowid generation */
2866 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2867 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2868 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2869 MVPP2_PRS_RI_L4_PROTO_MASK);
2871 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2872 MVPP2_PRS_IPV6_EXT_AI_BIT);
2873 /* Unmask all ports */
2874 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2876 /* Update shadow table and hw entry */
2877 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2878 mvpp2_prs_hw_write(priv, &pe);
2880 /* Default IPv6 entry for unicast address */
2881 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2882 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2883 pe.index = MVPP2_PE_IP6_ADDR_UN;
2885 /* Finished: go to IPv6 again */
2886 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2887 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2888 MVPP2_PRS_RI_L3_ADDR_MASK);
2889 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2890 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2891 /* Shift back to IPV6 NH */
2892 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2894 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2895 /* Unmask all ports */
2896 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2898 /* Update shadow table and hw entry */
2899 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2900 mvpp2_prs_hw_write(priv, &pe);
2905 /* Parser default initialization */
2906 static int mvpp2_prs_default_init(struct platform_device *pdev,
2911 /* Enable tcam table */
2912 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2914 /* Clear all tcam and sram entries */
2915 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2916 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2917 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2918 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2920 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2921 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2922 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2925 /* Invalidate all tcam entries */
2926 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2927 mvpp2_prs_hw_inv(priv, index);
2929 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2930 sizeof(struct mvpp2_prs_shadow),
2932 if (!priv->prs_shadow)
2935 /* Always start from lookup = 0 */
2936 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2937 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2938 MVPP2_PRS_PORT_LU_MAX, 0);
2940 mvpp2_prs_def_flow_init(priv);
2942 mvpp2_prs_mh_init(priv);
2944 mvpp2_prs_mac_init(priv);
2946 mvpp2_prs_dsa_init(priv);
2948 err = mvpp2_prs_etype_init(priv);
2952 err = mvpp2_prs_vlan_init(pdev, priv);
2956 err = mvpp2_prs_pppoe_init(priv);
2960 err = mvpp2_prs_ip6_init(priv);
2964 err = mvpp2_prs_ip4_init(priv);
2971 /* Compare MAC DA with tcam entry data */
2972 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2973 const u8 *da, unsigned char *mask)
2975 unsigned char tcam_byte, tcam_mask;
2978 for (index = 0; index < ETH_ALEN; index++) {
2979 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2980 if (tcam_mask != mask[index])
2983 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2990 /* Find tcam entry with matched pair <MAC DA, port> */
2991 static struct mvpp2_prs_entry *
2992 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2993 unsigned char *mask, int udf_type)
2995 struct mvpp2_prs_entry *pe;
2998 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3001 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3003 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3004 for (tid = MVPP2_PE_FIRST_FREE_TID;
3005 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3006 unsigned int entry_pmap;
3008 if (!priv->prs_shadow[tid].valid ||
3009 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3010 (priv->prs_shadow[tid].udf != udf_type))
3014 mvpp2_prs_hw_read(priv, pe);
3015 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3017 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3026 /* Update parser's mac da entry */
3027 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3028 const u8 *da, bool add)
3030 struct mvpp2_prs_entry *pe;
3031 unsigned int pmap, len, ri;
3032 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3035 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3036 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3037 MVPP2_PRS_UDF_MAC_DEF);
3044 /* Create new TCAM entry */
3045 /* Find first range mac entry*/
3046 for (tid = MVPP2_PE_FIRST_FREE_TID;
3047 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3048 if (priv->prs_shadow[tid].valid &&
3049 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3050 (priv->prs_shadow[tid].udf ==
3051 MVPP2_PRS_UDF_MAC_RANGE))
3054 /* Go through the all entries from first to last */
3055 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3060 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 /* Mask all ports */
3067 mvpp2_prs_tcam_port_map_set(pe, 0);
3070 /* Update port mask */
3071 mvpp2_prs_tcam_port_set(pe, port, add);
3073 /* Invalidate the entry if no ports are left enabled */
3074 pmap = mvpp2_prs_tcam_port_map_get(pe);
3080 mvpp2_prs_hw_inv(priv, pe->index);
3081 priv->prs_shadow[pe->index].valid = false;
3086 /* Continue - set next lookup */
3087 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3089 /* Set match on DA */
3092 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3094 /* Set result info bits */
3095 if (is_broadcast_ether_addr(da))
3096 ri = MVPP2_PRS_RI_L2_BCAST;
3097 else if (is_multicast_ether_addr(da))
3098 ri = MVPP2_PRS_RI_L2_MCAST;
3100 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3102 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3103 MVPP2_PRS_RI_MAC_ME_MASK);
3104 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3105 MVPP2_PRS_RI_MAC_ME_MASK);
3107 /* Shift to ethertype */
3108 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3109 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3111 /* Update shadow table and hw entry */
3112 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3113 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3114 mvpp2_prs_hw_write(priv, pe);
3121 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3123 struct mvpp2_port *port = netdev_priv(dev);
3126 /* Remove old parser entry */
3127 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3132 /* Add new parser entry */
3133 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3137 /* Set addr in the device */
3138 ether_addr_copy(dev->dev_addr, da);
3143 /* Delete all port's multicast simple (not range) entries */
3144 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3146 struct mvpp2_prs_entry pe;
3149 for (tid = MVPP2_PE_FIRST_FREE_TID;
3150 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3151 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3153 if (!priv->prs_shadow[tid].valid ||
3154 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3155 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3158 /* Only simple mac entries */
3160 mvpp2_prs_hw_read(priv, &pe);
3162 /* Read mac addr from entry */
3163 for (index = 0; index < ETH_ALEN; index++)
3164 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3167 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3168 /* Delete this entry */
3169 mvpp2_prs_mac_da_accept(priv, port, da, false);
3173 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3176 case MVPP2_TAG_TYPE_EDSA:
3177 /* Add port to EDSA entries */
3178 mvpp2_prs_dsa_tag_set(priv, port, true,
3179 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3180 mvpp2_prs_dsa_tag_set(priv, port, true,
3181 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3182 /* Remove port from DSA entries */
3183 mvpp2_prs_dsa_tag_set(priv, port, false,
3184 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3185 mvpp2_prs_dsa_tag_set(priv, port, false,
3186 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3189 case MVPP2_TAG_TYPE_DSA:
3190 /* Add port to DSA entries */
3191 mvpp2_prs_dsa_tag_set(priv, port, true,
3192 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3193 mvpp2_prs_dsa_tag_set(priv, port, true,
3194 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3195 /* Remove port from EDSA entries */
3196 mvpp2_prs_dsa_tag_set(priv, port, false,
3197 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3198 mvpp2_prs_dsa_tag_set(priv, port, false,
3199 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3202 case MVPP2_TAG_TYPE_MH:
3203 case MVPP2_TAG_TYPE_NONE:
3204 /* Remove port form EDSA and DSA entries */
3205 mvpp2_prs_dsa_tag_set(priv, port, false,
3206 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3207 mvpp2_prs_dsa_tag_set(priv, port, false,
3208 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3209 mvpp2_prs_dsa_tag_set(priv, port, false,
3210 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3211 mvpp2_prs_dsa_tag_set(priv, port, false,
3212 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3216 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3223 /* Set prs flow for the port */
3224 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3226 struct mvpp2_prs_entry *pe;
3229 pe = mvpp2_prs_flow_find(port->priv, port->id);
3231 /* Such entry not exist */
3233 /* Go through the all entires from last to first */
3234 tid = mvpp2_prs_tcam_first_free(port->priv,
3235 MVPP2_PE_LAST_FREE_TID,
3236 MVPP2_PE_FIRST_FREE_TID);
3240 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3244 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3248 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3249 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3251 /* Update shadow table */
3252 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3255 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3256 mvpp2_prs_hw_write(port->priv, pe);
3262 /* Classifier configuration routines */
3264 /* Update classification flow table registers */
3265 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3266 struct mvpp2_cls_flow_entry *fe)
3268 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3269 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3270 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3271 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3274 /* Update classification lookup table register */
3275 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3276 struct mvpp2_cls_lookup_entry *le)
3280 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3281 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3282 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3285 /* Classifier default initialization */
3286 static void mvpp2_cls_init(struct mvpp2 *priv)
3288 struct mvpp2_cls_lookup_entry le;
3289 struct mvpp2_cls_flow_entry fe;
3292 /* Enable classifier */
3293 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3295 /* Clear classifier flow table */
3296 memset(&fe.data, 0, sizeof(fe.data));
3297 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3299 mvpp2_cls_flow_write(priv, &fe);
3302 /* Clear classifier lookup table */
3304 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3307 mvpp2_cls_lookup_write(priv, &le);
3310 mvpp2_cls_lookup_write(priv, &le);
3314 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3316 struct mvpp2_cls_lookup_entry le;
3319 /* Set way for the port */
3320 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3321 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3322 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3324 /* Pick the entry to be accessed in lookup ID decoding table
3325 * according to the way and lkpid.
3327 le.lkpid = port->id;
3331 /* Set initial CPU queue for receiving packets */
3332 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3333 le.data |= port->first_rxq;
3335 /* Disable classification engines */
3336 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3338 /* Update lookup ID table entry */
3339 mvpp2_cls_lookup_write(port->priv, &le);
3342 /* Set CPU queue number for oversize packets */
3343 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3347 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3348 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3350 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3351 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3353 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3354 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3355 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3358 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3360 if (likely(pool->frag_size <= PAGE_SIZE))
3361 return netdev_alloc_frag(pool->frag_size);
3363 return kmalloc(pool->frag_size, GFP_ATOMIC);
3366 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3368 if (likely(pool->frag_size <= PAGE_SIZE))
3369 skb_free_frag(data);
3374 /* Buffer Manager configuration routines */
3377 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3379 struct mvpp2_bm_pool *bm_pool, int size)
3384 size_bytes = sizeof(u32) * size;
3385 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3386 &bm_pool->phys_addr,
3388 if (!bm_pool->virt_addr)
3391 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3392 MVPP2_BM_POOL_PTR_ALIGN)) {
3393 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3394 bm_pool->phys_addr);
3395 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3396 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3400 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3401 bm_pool->phys_addr);
3402 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3404 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3405 val |= MVPP2_BM_START_MASK;
3406 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3408 bm_pool->type = MVPP2_BM_FREE;
3409 bm_pool->size = size;
3410 bm_pool->pkt_size = 0;
3411 bm_pool->buf_num = 0;
3416 /* Set pool buffer size */
3417 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3418 struct mvpp2_bm_pool *bm_pool,
3423 bm_pool->buf_size = buf_size;
3425 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3426 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3429 /* Free all buffers from the pool */
3430 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3431 struct mvpp2_bm_pool *bm_pool)
3435 for (i = 0; i < bm_pool->buf_num; i++) {
3436 dma_addr_t buf_phys_addr;
3437 unsigned long vaddr;
3439 /* Get buffer virtual address (indirect access) */
3440 buf_phys_addr = mvpp2_read(priv,
3441 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3442 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3444 dma_unmap_single(dev, buf_phys_addr,
3445 bm_pool->buf_size, DMA_FROM_DEVICE);
3450 mvpp2_frag_free(bm_pool, (void *)vaddr);
3453 /* Update BM driver with number of buffers removed from pool */
3454 bm_pool->buf_num -= i;
3458 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3460 struct mvpp2_bm_pool *bm_pool)
3464 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3465 if (bm_pool->buf_num) {
3466 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3470 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3471 val |= MVPP2_BM_STOP_MASK;
3472 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3474 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3476 bm_pool->phys_addr);
3480 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3484 struct mvpp2_bm_pool *bm_pool;
3486 /* Create all pools with maximum size */
3487 size = MVPP2_BM_POOL_SIZE_MAX;
3488 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3489 bm_pool = &priv->bm_pools[i];
3491 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3493 goto err_unroll_pools;
3494 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3499 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3500 for (i = i - 1; i >= 0; i--)
3501 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3505 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3509 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3510 /* Mask BM all interrupts */
3511 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3512 /* Clear BM cause register */
3513 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3516 /* Allocate and initialize BM pools */
3517 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3518 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3519 if (!priv->bm_pools)
3522 err = mvpp2_bm_pools_init(pdev, priv);
3528 /* Attach long pool to rxq */
3529 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3530 int lrxq, int long_pool)
3535 /* Get queue physical ID */
3536 prxq = port->rxqs[lrxq]->id;
3538 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3539 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3540 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3541 MVPP2_RXQ_POOL_LONG_MASK);
3543 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3546 /* Attach short pool to rxq */
3547 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3548 int lrxq, int short_pool)
3553 /* Get queue physical ID */
3554 prxq = port->rxqs[lrxq]->id;
3556 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3557 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3558 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3559 MVPP2_RXQ_POOL_SHORT_MASK);
3561 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3564 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3565 struct mvpp2_bm_pool *bm_pool,
3566 dma_addr_t *buf_phys_addr,
3569 dma_addr_t phys_addr;
3572 data = mvpp2_frag_alloc(bm_pool);
3576 phys_addr = dma_map_single(port->dev->dev.parent, data,
3577 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3579 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3580 mvpp2_frag_free(bm_pool, data);
3583 *buf_phys_addr = phys_addr;
3588 /* Set pool number in a BM cookie */
3589 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3593 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3594 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3599 /* Get pool number from a BM cookie */
3600 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3602 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3605 /* Release buffer to BM */
3606 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3607 dma_addr_t buf_phys_addr,
3608 unsigned long buf_virt_addr)
3610 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3611 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3614 /* Release multicast buffer */
3615 static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3616 dma_addr_t buf_phys_addr,
3617 unsigned long buf_virt_addr,
3622 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3623 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3625 mvpp2_bm_pool_put(port, pool,
3626 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3630 /* Refill BM pool */
3631 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3632 dma_addr_t phys_addr,
3633 unsigned long cookie)
3635 int pool = mvpp2_bm_cookie_pool_get(bm);
3637 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3640 /* Allocate buffers for the pool */
3641 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3642 struct mvpp2_bm_pool *bm_pool, int buf_num)
3644 int i, buf_size, total_size;
3645 dma_addr_t phys_addr;
3648 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3649 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3652 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3653 netdev_err(port->dev,
3654 "cannot allocate %d buffers for pool %d\n",
3655 buf_num, bm_pool->id);
3659 for (i = 0; i < buf_num; i++) {
3660 buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3664 mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
3665 (unsigned long)buf);
3668 /* Update BM driver with number of buffers added to pool */
3669 bm_pool->buf_num += i;
3671 netdev_dbg(port->dev,
3672 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3673 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3674 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3676 netdev_dbg(port->dev,
3677 "%s pool %d: %d of %d buffers added\n",
3678 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3679 bm_pool->id, i, buf_num);
3683 /* Notify the driver that BM pool is being used as specific type and return the
3684 * pool pointer on success
3686 static struct mvpp2_bm_pool *
3687 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3690 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3693 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3694 netdev_err(port->dev, "mixing pool types is forbidden\n");
3698 if (new_pool->type == MVPP2_BM_FREE)
3699 new_pool->type = type;
3701 /* Allocate buffers in case BM pool is used as long pool, but packet
3702 * size doesn't match MTU or BM pool hasn't being used yet
3704 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3705 (new_pool->pkt_size == 0)) {
3708 /* Set default buffer number or free all the buffers in case
3709 * the pool is not empty
3711 pkts_num = new_pool->buf_num;
3713 pkts_num = type == MVPP2_BM_SWF_LONG ?
3714 MVPP2_BM_LONG_BUF_NUM :
3715 MVPP2_BM_SHORT_BUF_NUM;
3717 mvpp2_bm_bufs_free(port->dev->dev.parent,
3718 port->priv, new_pool);
3720 new_pool->pkt_size = pkt_size;
3721 new_pool->frag_size =
3722 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3723 MVPP2_SKB_SHINFO_SIZE;
3725 /* Allocate buffers for this pool */
3726 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3727 if (num != pkts_num) {
3728 WARN(1, "pool %d: %d of %d allocated\n",
3729 new_pool->id, num, pkts_num);
3734 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3735 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3740 /* Initialize pools for swf */
3741 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3745 if (!port->pool_long) {
3747 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3750 if (!port->pool_long)
3753 port->pool_long->port_map |= (1 << port->id);
3755 for (rxq = 0; rxq < rxq_number; rxq++)
3756 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3759 if (!port->pool_short) {
3761 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3763 MVPP2_BM_SHORT_PKT_SIZE);
3764 if (!port->pool_short)
3767 port->pool_short->port_map |= (1 << port->id);
3769 for (rxq = 0; rxq < rxq_number; rxq++)
3770 mvpp2_rxq_short_pool_set(port, rxq,
3771 port->pool_short->id);
3777 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3779 struct mvpp2_port *port = netdev_priv(dev);
3780 struct mvpp2_bm_pool *port_pool = port->pool_long;
3781 int num, pkts_num = port_pool->buf_num;
3782 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3784 /* Update BM pool with new buffer size */
3785 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3786 if (port_pool->buf_num) {
3787 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3791 port_pool->pkt_size = pkt_size;
3792 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3793 MVPP2_SKB_SHINFO_SIZE;
3794 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3795 if (num != pkts_num) {
3796 WARN(1, "pool %d: %d of %d allocated\n",
3797 port_pool->id, num, pkts_num);
3801 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3802 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3804 netdev_update_features(dev);
3808 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3810 int cpu, cpu_mask = 0;
3812 for_each_present_cpu(cpu)
3813 cpu_mask |= 1 << cpu;
3814 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3815 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3818 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3820 int cpu, cpu_mask = 0;
3822 for_each_present_cpu(cpu)
3823 cpu_mask |= 1 << cpu;
3824 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3825 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3828 /* Mask the current CPU's Rx/Tx interrupts */
3829 static void mvpp2_interrupts_mask(void *arg)
3831 struct mvpp2_port *port = arg;
3833 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3836 /* Unmask the current CPU's Rx/Tx interrupts */
3837 static void mvpp2_interrupts_unmask(void *arg)
3839 struct mvpp2_port *port = arg;
3841 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3842 (MVPP2_CAUSE_MISC_SUM_MASK |
3843 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3846 /* Port configuration routines */
3848 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3852 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3854 switch (port->phy_interface) {
3855 case PHY_INTERFACE_MODE_SGMII:
3856 val |= MVPP2_GMAC_INBAND_AN_MASK;
3858 case PHY_INTERFACE_MODE_RGMII:
3859 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3861 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3864 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3867 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3871 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3872 val |= MVPP2_GMAC_FC_ADV_EN;
3873 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3876 static void mvpp2_port_enable(struct mvpp2_port *port)
3880 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3881 val |= MVPP2_GMAC_PORT_EN_MASK;
3882 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3883 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3886 static void mvpp2_port_disable(struct mvpp2_port *port)
3890 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3891 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3892 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3895 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3896 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3900 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3901 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3902 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3905 /* Configure loopback port */
3906 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3910 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3912 if (port->speed == 1000)
3913 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3915 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3917 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3918 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3920 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3922 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3925 static void mvpp2_port_reset(struct mvpp2_port *port)
3929 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3930 ~MVPP2_GMAC_PORT_RESET_MASK;
3931 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3933 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3934 MVPP2_GMAC_PORT_RESET_MASK)
3938 /* Change maximum receive size of the port */
3939 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3943 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3944 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3945 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3946 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3947 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3950 /* Set defaults to the MVPP2 port */
3951 static void mvpp2_defaults_set(struct mvpp2_port *port)
3953 int tx_port_num, val, queue, ptxq, lrxq;
3955 /* Configure port to loopback if needed */
3956 if (port->flags & MVPP2_F_LOOPBACK)
3957 mvpp2_port_loopback_set(port);
3959 /* Update TX FIFO MIN Threshold */
3960 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3961 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3962 /* Min. TX threshold must be less than minimal packet length */
3963 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3964 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3966 /* Disable Legacy WRR, Disable EJP, Release from reset */
3967 tx_port_num = mvpp2_egress_port(port);
3968 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3970 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3972 /* Close bandwidth for all queues */
3973 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3974 ptxq = mvpp2_txq_phys(port->id, queue);
3975 mvpp2_write(port->priv,
3976 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3979 /* Set refill period to 1 usec, refill tokens
3980 * and bucket size to maximum
3982 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3983 port->priv->tclk / USEC_PER_SEC);
3984 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3985 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3986 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3987 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3988 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3989 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3990 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3992 /* Set MaximumLowLatencyPacketSize value to 256 */
3993 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3994 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3995 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3997 /* Enable Rx cache snoop */
3998 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3999 queue = port->rxqs[lrxq]->id;
4000 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4001 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4002 MVPP2_SNOOP_BUF_HDR_MASK;
4003 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4006 /* At default, mask all interrupts to all present cpus */
4007 mvpp2_interrupts_disable(port);
4010 /* Enable/disable receiving packets */
4011 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4016 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4017 queue = port->rxqs[lrxq]->id;
4018 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4019 val &= ~MVPP2_RXQ_DISABLE_MASK;
4020 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4024 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4029 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4030 queue = port->rxqs[lrxq]->id;
4031 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4032 val |= MVPP2_RXQ_DISABLE_MASK;
4033 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4037 /* Enable transmit via physical egress queue
4038 * - HW starts take descriptors from DRAM
4040 static void mvpp2_egress_enable(struct mvpp2_port *port)
4044 int tx_port_num = mvpp2_egress_port(port);
4046 /* Enable all initialized TXs. */
4048 for (queue = 0; queue < txq_number; queue++) {
4049 struct mvpp2_tx_queue *txq = port->txqs[queue];
4051 if (txq->descs != NULL)
4052 qmap |= (1 << queue);
4055 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4056 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4059 /* Disable transmit via physical egress queue
4060 * - HW doesn't take descriptors from DRAM
4062 static void mvpp2_egress_disable(struct mvpp2_port *port)
4066 int tx_port_num = mvpp2_egress_port(port);
4068 /* Issue stop command for active channels only */
4069 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4070 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4071 MVPP2_TXP_SCHED_ENQ_MASK;
4073 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4074 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4076 /* Wait for all Tx activity to terminate. */
4079 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4080 netdev_warn(port->dev,
4081 "Tx stop timed out, status=0x%08x\n",
4088 /* Check port TX Command register that all
4089 * Tx queues are stopped
4091 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4092 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4095 /* Rx descriptors helper methods */
4097 /* Get number of Rx descriptors occupied by received packets */
4099 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4101 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4103 return val & MVPP2_RXQ_OCCUPIED_MASK;
4106 /* Update Rx queue status with the number of occupied and available
4107 * Rx descriptor slots.
4110 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4111 int used_count, int free_count)
4113 /* Decrement the number of used descriptors and increment count
4114 * increment the number of free descriptors.
4116 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4118 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4121 /* Get pointer to next RX descriptor to be processed by SW */
4122 static inline struct mvpp2_rx_desc *
4123 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4125 int rx_desc = rxq->next_desc_to_proc;
4127 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4128 prefetch(rxq->descs + rxq->next_desc_to_proc);
4129 return rxq->descs + rx_desc;
4132 /* Set rx queue offset */
4133 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4134 int prxq, int offset)
4138 /* Convert offset from bytes to units of 32 bytes */
4139 offset = offset >> 5;
4141 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4142 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4145 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4146 MVPP2_RXQ_PACKET_OFFSET_MASK);
4148 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4151 /* Obtain BM cookie information from descriptor */
4152 static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4154 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4155 MVPP2_RXD_BM_POOL_ID_OFFS;
4156 int cpu = smp_processor_id();
4158 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4159 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4162 /* Tx descriptors helper methods */
4164 /* Get number of Tx descriptors waiting to be transmitted by HW */
4165 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4166 struct mvpp2_tx_queue *txq)
4170 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4171 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4173 return val & MVPP2_TXQ_PENDING_MASK;
4176 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4177 static struct mvpp2_tx_desc *
4178 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4180 int tx_desc = txq->next_desc_to_proc;
4182 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4183 return txq->descs + tx_desc;
4186 /* Update HW with number of aggregated Tx descriptors to be sent */
4187 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4189 /* aggregated access - relevant TXQ number is written in TX desc */
4190 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4194 /* Check if there are enough free descriptors in aggregated txq.
4195 * If not, update the number of occupied descriptors and repeat the check.
4197 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4198 struct mvpp2_tx_queue *aggr_txq, int num)
4200 if ((aggr_txq->count + num) > aggr_txq->size) {
4201 /* Update number of occupied aggregated Tx descriptors */
4202 int cpu = smp_processor_id();
4203 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4205 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4208 if ((aggr_txq->count + num) > aggr_txq->size)
4214 /* Reserved Tx descriptors allocation request */
4215 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4216 struct mvpp2_tx_queue *txq, int num)
4220 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4221 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4223 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4225 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4228 /* Check if there are enough reserved descriptors for transmission.
4229 * If not, request chunk of reserved descriptors and check again.
4231 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4232 struct mvpp2_tx_queue *txq,
4233 struct mvpp2_txq_pcpu *txq_pcpu,
4236 int req, cpu, desc_count;
4238 if (txq_pcpu->reserved_num >= num)
4241 /* Not enough descriptors reserved! Update the reserved descriptor
4242 * count and check again.
4246 /* Compute total of used descriptors */
4247 for_each_present_cpu(cpu) {
4248 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4250 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4251 desc_count += txq_pcpu_aux->count;
4252 desc_count += txq_pcpu_aux->reserved_num;
4255 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4259 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4262 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4264 /* OK, the descriptor cound has been updated: check again. */
4265 if (txq_pcpu->reserved_num < num)
4270 /* Release the last allocated Tx descriptor. Useful to handle DMA
4271 * mapping failures in the Tx path.
4273 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4275 if (txq->next_desc_to_proc == 0)
4276 txq->next_desc_to_proc = txq->last_desc - 1;
4278 txq->next_desc_to_proc--;
4281 /* Set Tx descriptors fields relevant for CSUM calculation */
4282 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4283 int ip_hdr_len, int l4_proto)
4287 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4288 * G_L4_chk, L4_type required only for checksum calculation
4290 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4291 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4292 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4294 if (l3_proto == swab16(ETH_P_IP)) {
4295 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4296 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4298 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4301 if (l4_proto == IPPROTO_TCP) {
4302 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4303 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4304 } else if (l4_proto == IPPROTO_UDP) {
4305 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4306 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4308 command |= MVPP2_TXD_L4_CSUM_NOT;
4314 /* Get number of sent descriptors and decrement counter.
4315 * The number of sent descriptors is returned.
4318 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4319 struct mvpp2_tx_queue *txq)
4323 /* Reading status reg resets transmitted descriptor counter */
4324 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4326 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4327 MVPP2_TRANSMITTED_COUNT_OFFSET;
4330 static void mvpp2_txq_sent_counter_clear(void *arg)
4332 struct mvpp2_port *port = arg;
4335 for (queue = 0; queue < txq_number; queue++) {
4336 int id = port->txqs[queue]->id;
4338 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4342 /* Set max sizes for Tx queues */
4343 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4346 int txq, tx_port_num;
4348 mtu = port->pkt_size * 8;
4349 if (mtu > MVPP2_TXP_MTU_MAX)
4350 mtu = MVPP2_TXP_MTU_MAX;
4352 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4355 /* Indirect access to registers */
4356 tx_port_num = mvpp2_egress_port(port);
4357 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4360 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4361 val &= ~MVPP2_TXP_MTU_MAX;
4363 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4365 /* TXP token size and all TXQs token size must be larger that MTU */
4366 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4367 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4370 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4372 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4375 for (txq = 0; txq < txq_number; txq++) {
4376 val = mvpp2_read(port->priv,
4377 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4378 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4382 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4384 mvpp2_write(port->priv,
4385 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4391 /* Set the number of packets that will be received before Rx interrupt
4392 * will be generated by HW.
4394 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4395 struct mvpp2_rx_queue *rxq)
4397 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4398 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4400 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4401 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
4405 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4407 u64 tmp = (u64)clk_hz * usec;
4409 do_div(tmp, USEC_PER_SEC);
4411 return tmp > U32_MAX ? U32_MAX : tmp;
4414 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4416 u64 tmp = (u64)cycles * USEC_PER_SEC;
4418 do_div(tmp, clk_hz);
4420 return tmp > U32_MAX ? U32_MAX : tmp;
4423 /* Set the time delay in usec before Rx interrupt */
4424 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4425 struct mvpp2_rx_queue *rxq)
4427 unsigned long freq = port->priv->tclk;
4428 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4430 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4432 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4434 /* re-evaluate to get actual register value */
4435 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4438 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4441 /* Free Tx queue skbuffs */
4442 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4443 struct mvpp2_tx_queue *txq,
4444 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4448 for (i = 0; i < num; i++) {
4449 struct mvpp2_txq_pcpu_buf *tx_buf =
4450 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4452 dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4453 tx_buf->size, DMA_TO_DEVICE);
4455 dev_kfree_skb_any(tx_buf->skb);
4457 mvpp2_txq_inc_get(txq_pcpu);
4461 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4464 int queue = fls(cause) - 1;
4466 return port->rxqs[queue];
4469 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4472 int queue = fls(cause) - 1;
4474 return port->txqs[queue];
4477 /* Handle end of transmission */
4478 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4479 struct mvpp2_txq_pcpu *txq_pcpu)
4481 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4484 if (txq_pcpu->cpu != smp_processor_id())
4485 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4487 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4490 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4492 txq_pcpu->count -= tx_done;
4494 if (netif_tx_queue_stopped(nq))
4495 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4496 netif_tx_wake_queue(nq);
4499 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4501 struct mvpp2_tx_queue *txq;
4502 struct mvpp2_txq_pcpu *txq_pcpu;
4503 unsigned int tx_todo = 0;
4506 txq = mvpp2_get_tx_queue(port, cause);
4510 txq_pcpu = this_cpu_ptr(txq->pcpu);
4512 if (txq_pcpu->count) {
4513 mvpp2_txq_done(port, txq, txq_pcpu);
4514 tx_todo += txq_pcpu->count;
4517 cause &= ~(1 << txq->log_id);
4522 /* Rx/Tx queue initialization/cleanup methods */
4524 /* Allocate and initialize descriptors for aggr TXQ */
4525 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4526 struct mvpp2_tx_queue *aggr_txq,
4527 int desc_num, int cpu,
4530 /* Allocate memory for TX descriptors */
4531 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4532 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4533 &aggr_txq->descs_phys, GFP_KERNEL);
4534 if (!aggr_txq->descs)
4537 aggr_txq->last_desc = aggr_txq->size - 1;
4539 /* Aggr TXQ no reset WA */
4540 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4541 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4543 /* Set Tx descriptors queue starting address */
4544 /* indirect access */
4545 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4546 aggr_txq->descs_phys);
4547 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4552 /* Create a specified Rx queue */
4553 static int mvpp2_rxq_init(struct mvpp2_port *port,
4554 struct mvpp2_rx_queue *rxq)
4557 rxq->size = port->rx_ring_size;
4559 /* Allocate memory for RX descriptors */
4560 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4561 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4562 &rxq->descs_phys, GFP_KERNEL);
4566 rxq->last_desc = rxq->size - 1;
4568 /* Zero occupied and non-occupied counters - direct access */
4569 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4571 /* Set Rx descriptors queue starting address - indirect access */
4572 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4573 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4574 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4575 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4578 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4580 /* Set coalescing pkts and time */
4581 mvpp2_rx_pkts_coal_set(port, rxq);
4582 mvpp2_rx_time_coal_set(port, rxq);
4584 /* Add number of descriptors ready for receiving packets */
4585 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4590 /* Push packets received by the RXQ to BM pool */
4591 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4592 struct mvpp2_rx_queue *rxq)
4596 rx_received = mvpp2_rxq_received(port, rxq->id);
4600 for (i = 0; i < rx_received; i++) {
4601 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4602 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4604 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4605 rx_desc->buf_cookie);
4607 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4610 /* Cleanup Rx queue */
4611 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4612 struct mvpp2_rx_queue *rxq)
4614 mvpp2_rxq_drop_pkts(port, rxq);
4617 dma_free_coherent(port->dev->dev.parent,
4618 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4624 rxq->next_desc_to_proc = 0;
4625 rxq->descs_phys = 0;
4627 /* Clear Rx descriptors queue starting address and size;
4628 * free descriptor number
4630 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4631 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4632 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4633 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4636 /* Create and initialize a Tx queue */
4637 static int mvpp2_txq_init(struct mvpp2_port *port,
4638 struct mvpp2_tx_queue *txq)
4641 int cpu, desc, desc_per_txq, tx_port_num;
4642 struct mvpp2_txq_pcpu *txq_pcpu;
4644 txq->size = port->tx_ring_size;
4646 /* Allocate memory for Tx descriptors */
4647 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4648 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4649 &txq->descs_phys, GFP_KERNEL);
4653 txq->last_desc = txq->size - 1;
4655 /* Set Tx descriptors queue starting address - indirect access */
4656 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4657 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4658 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4659 MVPP2_TXQ_DESC_SIZE_MASK);
4660 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4661 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4662 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4663 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4664 val &= ~MVPP2_TXQ_PENDING_MASK;
4665 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4667 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4668 * for each existing TXQ.
4669 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4670 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4673 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4674 (txq->log_id * desc_per_txq);
4676 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4677 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4678 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4680 /* WRR / EJP configuration - indirect access */
4681 tx_port_num = mvpp2_egress_port(port);
4682 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4684 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4685 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4686 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4687 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4688 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4690 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4691 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4694 for_each_present_cpu(cpu) {
4695 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4696 txq_pcpu->size = txq->size;
4697 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4698 sizeof(struct mvpp2_txq_pcpu_buf),
4700 if (!txq_pcpu->buffs)
4703 txq_pcpu->count = 0;
4704 txq_pcpu->reserved_num = 0;
4705 txq_pcpu->txq_put_index = 0;
4706 txq_pcpu->txq_get_index = 0;
4712 for_each_present_cpu(cpu) {
4713 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4714 kfree(txq_pcpu->buffs);
4717 dma_free_coherent(port->dev->dev.parent,
4718 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4719 txq->descs, txq->descs_phys);
4724 /* Free allocated TXQ resources */
4725 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4726 struct mvpp2_tx_queue *txq)
4728 struct mvpp2_txq_pcpu *txq_pcpu;
4731 for_each_present_cpu(cpu) {
4732 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4733 kfree(txq_pcpu->buffs);
4737 dma_free_coherent(port->dev->dev.parent,
4738 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4739 txq->descs, txq->descs_phys);
4743 txq->next_desc_to_proc = 0;
4744 txq->descs_phys = 0;
4746 /* Set minimum bandwidth for disabled TXQs */
4747 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4749 /* Set Tx descriptors queue starting address and size */
4750 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4751 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4752 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4755 /* Cleanup Tx ports */
4756 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4758 struct mvpp2_txq_pcpu *txq_pcpu;
4759 int delay, pending, cpu;
4762 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4763 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4764 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4765 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4767 /* The napi queue has been stopped so wait for all packets
4768 * to be transmitted.
4772 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4773 netdev_warn(port->dev,
4774 "port %d: cleaning queue %d timed out\n",
4775 port->id, txq->log_id);
4781 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4784 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4785 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4787 for_each_present_cpu(cpu) {
4788 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4790 /* Release all packets */
4791 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4794 txq_pcpu->count = 0;
4795 txq_pcpu->txq_put_index = 0;
4796 txq_pcpu->txq_get_index = 0;
4800 /* Cleanup all Tx queues */
4801 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4803 struct mvpp2_tx_queue *txq;
4807 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4809 /* Reset Tx ports and delete Tx queues */
4810 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4811 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4813 for (queue = 0; queue < txq_number; queue++) {
4814 txq = port->txqs[queue];
4815 mvpp2_txq_clean(port, txq);
4816 mvpp2_txq_deinit(port, txq);
4819 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4821 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4822 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4825 /* Cleanup all Rx queues */
4826 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4830 for (queue = 0; queue < rxq_number; queue++)
4831 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4834 /* Init all Rx queues for port */
4835 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4839 for (queue = 0; queue < rxq_number; queue++) {
4840 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4847 mvpp2_cleanup_rxqs(port);
4851 /* Init all tx queues for port */
4852 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4854 struct mvpp2_tx_queue *txq;
4857 for (queue = 0; queue < txq_number; queue++) {
4858 txq = port->txqs[queue];
4859 err = mvpp2_txq_init(port, txq);
4864 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4868 mvpp2_cleanup_txqs(port);
4872 /* The callback for per-port interrupt */
4873 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4875 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4877 mvpp2_interrupts_disable(port);
4879 napi_schedule(&port->napi);
4885 static void mvpp2_link_event(struct net_device *dev)
4887 struct mvpp2_port *port = netdev_priv(dev);
4888 struct phy_device *phydev = dev->phydev;
4889 int status_change = 0;
4893 if ((port->speed != phydev->speed) ||
4894 (port->duplex != phydev->duplex)) {
4897 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4898 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4899 MVPP2_GMAC_CONFIG_GMII_SPEED |
4900 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4901 MVPP2_GMAC_AN_SPEED_EN |
4902 MVPP2_GMAC_AN_DUPLEX_EN);
4905 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4907 if (phydev->speed == SPEED_1000)
4908 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4909 else if (phydev->speed == SPEED_100)
4910 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4912 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4914 port->duplex = phydev->duplex;
4915 port->speed = phydev->speed;
4919 if (phydev->link != port->link) {
4920 if (!phydev->link) {
4925 port->link = phydev->link;
4929 if (status_change) {
4931 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4932 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4933 MVPP2_GMAC_FORCE_LINK_DOWN);
4934 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4935 mvpp2_egress_enable(port);
4936 mvpp2_ingress_enable(port);
4938 mvpp2_ingress_disable(port);
4939 mvpp2_egress_disable(port);
4941 phy_print_status(phydev);
4945 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4949 if (!port_pcpu->timer_scheduled) {
4950 port_pcpu->timer_scheduled = true;
4951 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
4952 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4953 HRTIMER_MODE_REL_PINNED);
4957 static void mvpp2_tx_proc_cb(unsigned long data)
4959 struct net_device *dev = (struct net_device *)data;
4960 struct mvpp2_port *port = netdev_priv(dev);
4961 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4962 unsigned int tx_todo, cause;
4964 if (!netif_running(dev))
4966 port_pcpu->timer_scheduled = false;
4968 /* Process all the Tx queues */
4969 cause = (1 << txq_number) - 1;
4970 tx_todo = mvpp2_tx_done(port, cause);
4972 /* Set the timer in case not all the packets were processed */
4974 mvpp2_timer_set(port_pcpu);
4977 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4979 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4980 struct mvpp2_port_pcpu,
4983 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4985 return HRTIMER_NORESTART;
4988 /* Main RX/TX processing routines */
4990 /* Display more error info */
4991 static void mvpp2_rx_error(struct mvpp2_port *port,
4992 struct mvpp2_rx_desc *rx_desc)
4994 u32 status = rx_desc->status;
4996 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4997 case MVPP2_RXD_ERR_CRC:
4998 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4999 status, rx_desc->data_size);
5001 case MVPP2_RXD_ERR_OVERRUN:
5002 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
5003 status, rx_desc->data_size);
5005 case MVPP2_RXD_ERR_RESOURCE:
5006 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
5007 status, rx_desc->data_size);
5012 /* Handle RX checksum offload */
5013 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5014 struct sk_buff *skb)
5016 if (((status & MVPP2_RXD_L3_IP4) &&
5017 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5018 (status & MVPP2_RXD_L3_IP6))
5019 if (((status & MVPP2_RXD_L4_UDP) ||
5020 (status & MVPP2_RXD_L4_TCP)) &&
5021 (status & MVPP2_RXD_L4_CSUM_OK)) {
5023 skb->ip_summed = CHECKSUM_UNNECESSARY;
5027 skb->ip_summed = CHECKSUM_NONE;
5030 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5031 static int mvpp2_rx_refill(struct mvpp2_port *port,
5032 struct mvpp2_bm_pool *bm_pool, u32 bm)
5034 dma_addr_t phys_addr;
5037 /* No recycle or too many buffers are in use, so allocate a new skb */
5038 buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5042 mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
5047 /* Handle tx checksum */
5048 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5050 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5054 if (skb->protocol == htons(ETH_P_IP)) {
5055 struct iphdr *ip4h = ip_hdr(skb);
5057 /* Calculate IPv4 checksum and L4 checksum */
5058 ip_hdr_len = ip4h->ihl;
5059 l4_proto = ip4h->protocol;
5060 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5061 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5063 /* Read l4_protocol from one of IPv6 extra headers */
5064 if (skb_network_header_len(skb) > 0)
5065 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5066 l4_proto = ip6h->nexthdr;
5068 return MVPP2_TXD_L4_CSUM_NOT;
5071 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5072 skb->protocol, ip_hdr_len, l4_proto);
5075 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5078 static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5079 struct mvpp2_rx_desc *rx_desc)
5081 struct mvpp2_buff_hdr *buff_hdr;
5082 struct sk_buff *skb;
5083 u32 rx_status = rx_desc->status;
5084 dma_addr_t buff_phys_addr;
5085 unsigned long buff_virt_addr;
5086 dma_addr_t buff_phys_addr_next;
5087 unsigned long buff_virt_addr_next;
5091 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5092 MVPP2_RXD_BM_POOL_ID_OFFS;
5093 buff_phys_addr = rx_desc->buf_phys_addr;
5094 buff_virt_addr = rx_desc->buf_cookie;
5097 skb = (struct sk_buff *)buff_virt_addr;
5098 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5100 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5102 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5103 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5105 /* Release buffer */
5106 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5107 buff_virt_addr, mc_id);
5109 buff_phys_addr = buff_phys_addr_next;
5110 buff_virt_addr = buff_virt_addr_next;
5112 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5115 /* Main rx processing */
5116 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5117 struct mvpp2_rx_queue *rxq)
5119 struct net_device *dev = port->dev;
5125 /* Get number of received packets and clamp the to-do */
5126 rx_received = mvpp2_rxq_received(port, rxq->id);
5127 if (rx_todo > rx_received)
5128 rx_todo = rx_received;
5130 while (rx_done < rx_todo) {
5131 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5132 struct mvpp2_bm_pool *bm_pool;
5133 struct sk_buff *skb;
5134 unsigned int frag_size;
5135 dma_addr_t phys_addr;
5137 int pool, rx_bytes, err;
5141 rx_status = rx_desc->status;
5142 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5143 phys_addr = rx_desc->buf_phys_addr;
5144 data = (void *)(uintptr_t)rx_desc->buf_cookie;
5146 bm = mvpp2_bm_cookie_build(rx_desc);
5147 pool = mvpp2_bm_cookie_pool_get(bm);
5148 bm_pool = &port->priv->bm_pools[pool];
5149 /* Check if buffer header is used */
5150 if (rx_status & MVPP2_RXD_BUF_HDR) {
5151 mvpp2_buff_hdr_rx(port, rx_desc);
5155 /* In case of an error, release the requested buffer pointer
5156 * to the Buffer Manager. This request process is controlled
5157 * by the hardware, and the information about the buffer is
5158 * comprised by the RX descriptor.
5160 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5162 dev->stats.rx_errors++;
5163 mvpp2_rx_error(port, rx_desc);
5164 /* Return the buffer to the pool */
5166 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5167 rx_desc->buf_cookie);
5171 if (bm_pool->frag_size > PAGE_SIZE)
5174 frag_size = bm_pool->frag_size;
5176 skb = build_skb(data, frag_size);
5178 netdev_warn(port->dev, "skb build failed\n");
5179 goto err_drop_frame;
5182 err = mvpp2_rx_refill(port, bm_pool, bm);
5184 netdev_err(port->dev, "failed to refill BM pools\n");
5185 goto err_drop_frame;
5188 dma_unmap_single(dev->dev.parent, phys_addr,
5189 bm_pool->buf_size, DMA_FROM_DEVICE);
5192 rcvd_bytes += rx_bytes;
5194 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5195 skb_put(skb, rx_bytes);
5196 skb->protocol = eth_type_trans(skb, dev);
5197 mvpp2_rx_csum(port, rx_status, skb);
5199 napi_gro_receive(&port->napi, skb);
5203 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5205 u64_stats_update_begin(&stats->syncp);
5206 stats->rx_packets += rcvd_pkts;
5207 stats->rx_bytes += rcvd_bytes;
5208 u64_stats_update_end(&stats->syncp);
5211 /* Update Rx queue management counters */
5213 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5219 tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5220 struct mvpp2_tx_desc *desc)
5222 dma_unmap_single(dev, desc->buf_phys_addr,
5223 desc->data_size, DMA_TO_DEVICE);
5224 mvpp2_txq_desc_put(txq);
5227 /* Handle tx fragmentation processing */
5228 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5229 struct mvpp2_tx_queue *aggr_txq,
5230 struct mvpp2_tx_queue *txq)
5232 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5233 struct mvpp2_tx_desc *tx_desc;
5235 dma_addr_t buf_phys_addr;
5237 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5239 void *addr = page_address(frag->page.p) + frag->page_offset;
5241 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5242 tx_desc->phys_txq = txq->id;
5243 tx_desc->data_size = frag->size;
5245 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5248 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5249 mvpp2_txq_desc_put(txq);
5253 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5254 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5256 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5257 /* Last descriptor */
5258 tx_desc->command = MVPP2_TXD_L_DESC;
5259 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5261 /* Descriptor in the middle: Not First, Not Last */
5262 tx_desc->command = 0;
5263 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5270 /* Release all descriptors that were used to map fragments of
5271 * this packet, as well as the corresponding DMA mappings
5273 for (i = i - 1; i >= 0; i--) {
5274 tx_desc = txq->descs + i;
5275 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5281 /* Main tx processing */
5282 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5284 struct mvpp2_port *port = netdev_priv(dev);
5285 struct mvpp2_tx_queue *txq, *aggr_txq;
5286 struct mvpp2_txq_pcpu *txq_pcpu;
5287 struct mvpp2_tx_desc *tx_desc;
5288 dma_addr_t buf_phys_addr;
5293 txq_id = skb_get_queue_mapping(skb);
5294 txq = port->txqs[txq_id];
5295 txq_pcpu = this_cpu_ptr(txq->pcpu);
5296 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5298 frags = skb_shinfo(skb)->nr_frags + 1;
5300 /* Check number of available descriptors */
5301 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5302 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5308 /* Get a descriptor for the first part of the packet */
5309 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5310 tx_desc->phys_txq = txq->id;
5311 tx_desc->data_size = skb_headlen(skb);
5313 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5314 tx_desc->data_size, DMA_TO_DEVICE);
5315 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5316 mvpp2_txq_desc_put(txq);
5320 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5321 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5323 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5326 /* First and Last descriptor */
5327 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5328 tx_desc->command = tx_cmd;
5329 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5331 /* First but not Last */
5332 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5333 tx_desc->command = tx_cmd;
5334 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5336 /* Continue with other skb fragments */
5337 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5338 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5344 txq_pcpu->reserved_num -= frags;
5345 txq_pcpu->count += frags;
5346 aggr_txq->count += frags;
5348 /* Enable transmit */
5350 mvpp2_aggr_txq_pend_desc_add(port, frags);
5352 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5353 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5355 netif_tx_stop_queue(nq);
5359 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5361 u64_stats_update_begin(&stats->syncp);
5362 stats->tx_packets++;
5363 stats->tx_bytes += skb->len;
5364 u64_stats_update_end(&stats->syncp);
5366 dev->stats.tx_dropped++;
5367 dev_kfree_skb_any(skb);
5370 /* Finalize TX processing */
5371 if (txq_pcpu->count >= txq->done_pkts_coal)
5372 mvpp2_txq_done(port, txq, txq_pcpu);
5374 /* Set the timer in case not all frags were processed */
5375 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5376 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5378 mvpp2_timer_set(port_pcpu);
5381 return NETDEV_TX_OK;
5384 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5386 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5387 netdev_err(dev, "FCS error\n");
5388 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5389 netdev_err(dev, "rx fifo overrun error\n");
5390 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5391 netdev_err(dev, "tx fifo underrun error\n");
5394 static int mvpp2_poll(struct napi_struct *napi, int budget)
5396 u32 cause_rx_tx, cause_rx, cause_misc;
5398 struct mvpp2_port *port = netdev_priv(napi->dev);
5400 /* Rx/Tx cause register
5402 * Bits 0-15: each bit indicates received packets on the Rx queue
5403 * (bit 0 is for Rx queue 0).
5405 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5406 * (bit 16 is for Tx queue 0).
5408 * Each CPU has its own Rx/Tx cause register
5410 cause_rx_tx = mvpp2_read(port->priv,
5411 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5412 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5413 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5416 mvpp2_cause_error(port->dev, cause_misc);
5418 /* Clear the cause register */
5419 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5420 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5421 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5424 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5426 /* Process RX packets */
5427 cause_rx |= port->pending_cause_rx;
5428 while (cause_rx && budget > 0) {
5430 struct mvpp2_rx_queue *rxq;
5432 rxq = mvpp2_get_rx_queue(port, cause_rx);
5436 count = mvpp2_rx(port, budget, rxq);
5440 /* Clear the bit associated to this Rx queue
5441 * so that next iteration will continue from
5442 * the next Rx queue.
5444 cause_rx &= ~(1 << rxq->logic_rxq);
5450 napi_complete_done(napi, rx_done);
5452 mvpp2_interrupts_enable(port);
5454 port->pending_cause_rx = cause_rx;
5458 /* Set hw internals when starting port */
5459 static void mvpp2_start_dev(struct mvpp2_port *port)
5461 struct net_device *ndev = port->dev;
5463 mvpp2_gmac_max_rx_size_set(port);
5464 mvpp2_txp_max_tx_size_set(port);
5466 napi_enable(&port->napi);
5468 /* Enable interrupts on all CPUs */
5469 mvpp2_interrupts_enable(port);
5471 mvpp2_port_enable(port);
5472 phy_start(ndev->phydev);
5473 netif_tx_start_all_queues(port->dev);
5476 /* Set hw internals when stopping port */
5477 static void mvpp2_stop_dev(struct mvpp2_port *port)
5479 struct net_device *ndev = port->dev;
5481 /* Stop new packets from arriving to RXQs */
5482 mvpp2_ingress_disable(port);
5486 /* Disable interrupts on all CPUs */
5487 mvpp2_interrupts_disable(port);
5489 napi_disable(&port->napi);
5491 netif_carrier_off(port->dev);
5492 netif_tx_stop_all_queues(port->dev);
5494 mvpp2_egress_disable(port);
5495 mvpp2_port_disable(port);
5496 phy_stop(ndev->phydev);
5499 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5500 struct ethtool_ringparam *ring)
5502 u16 new_rx_pending = ring->rx_pending;
5503 u16 new_tx_pending = ring->tx_pending;
5505 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5508 if (ring->rx_pending > MVPP2_MAX_RXD)
5509 new_rx_pending = MVPP2_MAX_RXD;
5510 else if (!IS_ALIGNED(ring->rx_pending, 16))
5511 new_rx_pending = ALIGN(ring->rx_pending, 16);
5513 if (ring->tx_pending > MVPP2_MAX_TXD)
5514 new_tx_pending = MVPP2_MAX_TXD;
5515 else if (!IS_ALIGNED(ring->tx_pending, 32))
5516 new_tx_pending = ALIGN(ring->tx_pending, 32);
5518 if (ring->rx_pending != new_rx_pending) {
5519 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5520 ring->rx_pending, new_rx_pending);
5521 ring->rx_pending = new_rx_pending;
5524 if (ring->tx_pending != new_tx_pending) {
5525 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5526 ring->tx_pending, new_tx_pending);
5527 ring->tx_pending = new_tx_pending;
5533 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5535 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5537 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5538 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5539 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5540 addr[0] = (mac_addr_h >> 24) & 0xFF;
5541 addr[1] = (mac_addr_h >> 16) & 0xFF;
5542 addr[2] = (mac_addr_h >> 8) & 0xFF;
5543 addr[3] = mac_addr_h & 0xFF;
5544 addr[4] = mac_addr_m & 0xFF;
5545 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5548 static int mvpp2_phy_connect(struct mvpp2_port *port)
5550 struct phy_device *phy_dev;
5552 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5553 port->phy_interface);
5555 netdev_err(port->dev, "cannot connect to phy\n");
5558 phy_dev->supported &= PHY_GBIT_FEATURES;
5559 phy_dev->advertising = phy_dev->supported;
5568 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5570 struct net_device *ndev = port->dev;
5572 phy_disconnect(ndev->phydev);
5575 static int mvpp2_open(struct net_device *dev)
5577 struct mvpp2_port *port = netdev_priv(dev);
5578 unsigned char mac_bcast[ETH_ALEN] = {
5579 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5582 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5584 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5587 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5588 dev->dev_addr, true);
5590 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5593 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5595 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5598 err = mvpp2_prs_def_flow(port);
5600 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5604 /* Allocate the Rx/Tx queues */
5605 err = mvpp2_setup_rxqs(port);
5607 netdev_err(port->dev, "cannot allocate Rx queues\n");
5611 err = mvpp2_setup_txqs(port);
5613 netdev_err(port->dev, "cannot allocate Tx queues\n");
5614 goto err_cleanup_rxqs;
5617 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5619 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5620 goto err_cleanup_txqs;
5623 /* In default link is down */
5624 netif_carrier_off(port->dev);
5626 err = mvpp2_phy_connect(port);
5630 /* Unmask interrupts on all CPUs */
5631 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5633 mvpp2_start_dev(port);
5638 free_irq(port->irq, port);
5640 mvpp2_cleanup_txqs(port);
5642 mvpp2_cleanup_rxqs(port);
5646 static int mvpp2_stop(struct net_device *dev)
5648 struct mvpp2_port *port = netdev_priv(dev);
5649 struct mvpp2_port_pcpu *port_pcpu;
5652 mvpp2_stop_dev(port);
5653 mvpp2_phy_disconnect(port);
5655 /* Mask interrupts on all CPUs */
5656 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5658 free_irq(port->irq, port);
5659 for_each_present_cpu(cpu) {
5660 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5662 hrtimer_cancel(&port_pcpu->tx_done_timer);
5663 port_pcpu->timer_scheduled = false;
5664 tasklet_kill(&port_pcpu->tx_done_tasklet);
5666 mvpp2_cleanup_rxqs(port);
5667 mvpp2_cleanup_txqs(port);
5672 static void mvpp2_set_rx_mode(struct net_device *dev)
5674 struct mvpp2_port *port = netdev_priv(dev);
5675 struct mvpp2 *priv = port->priv;
5676 struct netdev_hw_addr *ha;
5678 bool allmulti = dev->flags & IFF_ALLMULTI;
5680 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5681 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5682 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5684 /* Remove all port->id's mcast enries */
5685 mvpp2_prs_mcast_del_all(priv, id);
5687 if (allmulti && !netdev_mc_empty(dev)) {
5688 netdev_for_each_mc_addr(ha, dev)
5689 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5693 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5695 struct mvpp2_port *port = netdev_priv(dev);
5696 const struct sockaddr *addr = p;
5699 if (!is_valid_ether_addr(addr->sa_data)) {
5700 err = -EADDRNOTAVAIL;
5704 if (!netif_running(dev)) {
5705 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5708 /* Reconfigure parser to accept the original MAC address */
5709 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5714 mvpp2_stop_dev(port);
5716 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5720 /* Reconfigure parser accept the original MAC address */
5721 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5725 mvpp2_start_dev(port);
5726 mvpp2_egress_enable(port);
5727 mvpp2_ingress_enable(port);
5731 netdev_err(dev, "fail to change MAC address\n");
5735 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5737 struct mvpp2_port *port = netdev_priv(dev);
5740 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5741 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5742 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5743 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5746 if (!netif_running(dev)) {
5747 err = mvpp2_bm_update_mtu(dev, mtu);
5749 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5753 /* Reconfigure BM to the original MTU */
5754 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5759 mvpp2_stop_dev(port);
5761 err = mvpp2_bm_update_mtu(dev, mtu);
5763 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5767 /* Reconfigure BM to the original MTU */
5768 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5773 mvpp2_start_dev(port);
5774 mvpp2_egress_enable(port);
5775 mvpp2_ingress_enable(port);
5780 netdev_err(dev, "fail to change MTU\n");
5785 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5787 struct mvpp2_port *port = netdev_priv(dev);
5791 for_each_possible_cpu(cpu) {
5792 struct mvpp2_pcpu_stats *cpu_stats;
5798 cpu_stats = per_cpu_ptr(port->stats, cpu);
5800 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5801 rx_packets = cpu_stats->rx_packets;
5802 rx_bytes = cpu_stats->rx_bytes;
5803 tx_packets = cpu_stats->tx_packets;
5804 tx_bytes = cpu_stats->tx_bytes;
5805 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5807 stats->rx_packets += rx_packets;
5808 stats->rx_bytes += rx_bytes;
5809 stats->tx_packets += tx_packets;
5810 stats->tx_bytes += tx_bytes;
5813 stats->rx_errors = dev->stats.rx_errors;
5814 stats->rx_dropped = dev->stats.rx_dropped;
5815 stats->tx_dropped = dev->stats.tx_dropped;
5818 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5825 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
5827 mvpp2_link_event(dev);
5832 /* Ethtool methods */
5834 /* Set interrupt coalescing for ethtools */
5835 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5836 struct ethtool_coalesce *c)
5838 struct mvpp2_port *port = netdev_priv(dev);
5841 for (queue = 0; queue < rxq_number; queue++) {
5842 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5844 rxq->time_coal = c->rx_coalesce_usecs;
5845 rxq->pkts_coal = c->rx_max_coalesced_frames;
5846 mvpp2_rx_pkts_coal_set(port, rxq);
5847 mvpp2_rx_time_coal_set(port, rxq);
5850 for (queue = 0; queue < txq_number; queue++) {
5851 struct mvpp2_tx_queue *txq = port->txqs[queue];
5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5859 /* get coalescing for ethtools */
5860 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5861 struct ethtool_coalesce *c)
5863 struct mvpp2_port *port = netdev_priv(dev);
5865 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5866 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5867 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5871 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5872 struct ethtool_drvinfo *drvinfo)
5874 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5875 sizeof(drvinfo->driver));
5876 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5877 sizeof(drvinfo->version));
5878 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5879 sizeof(drvinfo->bus_info));
5882 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5883 struct ethtool_ringparam *ring)
5885 struct mvpp2_port *port = netdev_priv(dev);
5887 ring->rx_max_pending = MVPP2_MAX_RXD;
5888 ring->tx_max_pending = MVPP2_MAX_TXD;
5889 ring->rx_pending = port->rx_ring_size;
5890 ring->tx_pending = port->tx_ring_size;
5893 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5894 struct ethtool_ringparam *ring)
5896 struct mvpp2_port *port = netdev_priv(dev);
5897 u16 prev_rx_ring_size = port->rx_ring_size;
5898 u16 prev_tx_ring_size = port->tx_ring_size;
5901 err = mvpp2_check_ringparam_valid(dev, ring);
5905 if (!netif_running(dev)) {
5906 port->rx_ring_size = ring->rx_pending;
5907 port->tx_ring_size = ring->tx_pending;
5911 /* The interface is running, so we have to force a
5912 * reallocation of the queues
5914 mvpp2_stop_dev(port);
5915 mvpp2_cleanup_rxqs(port);
5916 mvpp2_cleanup_txqs(port);
5918 port->rx_ring_size = ring->rx_pending;
5919 port->tx_ring_size = ring->tx_pending;
5921 err = mvpp2_setup_rxqs(port);
5923 /* Reallocate Rx queues with the original ring size */
5924 port->rx_ring_size = prev_rx_ring_size;
5925 ring->rx_pending = prev_rx_ring_size;
5926 err = mvpp2_setup_rxqs(port);
5930 err = mvpp2_setup_txqs(port);
5932 /* Reallocate Tx queues with the original ring size */
5933 port->tx_ring_size = prev_tx_ring_size;
5934 ring->tx_pending = prev_tx_ring_size;
5935 err = mvpp2_setup_txqs(port);
5937 goto err_clean_rxqs;
5940 mvpp2_start_dev(port);
5941 mvpp2_egress_enable(port);
5942 mvpp2_ingress_enable(port);
5947 mvpp2_cleanup_rxqs(port);
5949 netdev_err(dev, "fail to change ring parameters");
5955 static const struct net_device_ops mvpp2_netdev_ops = {
5956 .ndo_open = mvpp2_open,
5957 .ndo_stop = mvpp2_stop,
5958 .ndo_start_xmit = mvpp2_tx,
5959 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5960 .ndo_set_mac_address = mvpp2_set_mac_address,
5961 .ndo_change_mtu = mvpp2_change_mtu,
5962 .ndo_get_stats64 = mvpp2_get_stats64,
5963 .ndo_do_ioctl = mvpp2_ioctl,
5966 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5967 .nway_reset = phy_ethtool_nway_reset,
5968 .get_link = ethtool_op_get_link,
5969 .set_coalesce = mvpp2_ethtool_set_coalesce,
5970 .get_coalesce = mvpp2_ethtool_get_coalesce,
5971 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5972 .get_ringparam = mvpp2_ethtool_get_ringparam,
5973 .set_ringparam = mvpp2_ethtool_set_ringparam,
5974 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5975 .set_link_ksettings = phy_ethtool_set_link_ksettings,
5978 /* Driver initialization */
5980 static void mvpp2_port_power_up(struct mvpp2_port *port)
5982 mvpp2_port_mii_set(port);
5983 mvpp2_port_periodic_xon_disable(port);
5984 mvpp2_port_fc_adv_enable(port);
5985 mvpp2_port_reset(port);
5988 /* Initialize port HW */
5989 static int mvpp2_port_init(struct mvpp2_port *port)
5991 struct device *dev = port->dev->dev.parent;
5992 struct mvpp2 *priv = port->priv;
5993 struct mvpp2_txq_pcpu *txq_pcpu;
5994 int queue, cpu, err;
5996 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6000 mvpp2_egress_disable(port);
6001 mvpp2_port_disable(port);
6003 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6008 /* Associate physical Tx queues to this port and initialize.
6009 * The mapping is predefined.
6011 for (queue = 0; queue < txq_number; queue++) {
6012 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6013 struct mvpp2_tx_queue *txq;
6015 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6018 goto err_free_percpu;
6021 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6024 goto err_free_percpu;
6027 txq->id = queue_phy_id;
6028 txq->log_id = queue;
6029 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6030 for_each_present_cpu(cpu) {
6031 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6032 txq_pcpu->cpu = cpu;
6035 port->txqs[queue] = txq;
6038 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6042 goto err_free_percpu;
6045 /* Allocate and initialize Rx queue for this port */
6046 for (queue = 0; queue < rxq_number; queue++) {
6047 struct mvpp2_rx_queue *rxq;
6049 /* Map physical Rx queue to port's logical Rx queue */
6050 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6053 goto err_free_percpu;
6055 /* Map this Rx queue to a physical queue */
6056 rxq->id = port->first_rxq + queue;
6057 rxq->port = port->id;
6058 rxq->logic_rxq = queue;
6060 port->rxqs[queue] = rxq;
6063 /* Configure Rx queue group interrupt for this port */
6064 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6066 /* Create Rx descriptor rings */
6067 for (queue = 0; queue < rxq_number; queue++) {
6068 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6070 rxq->size = port->rx_ring_size;
6071 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6072 rxq->time_coal = MVPP2_RX_COAL_USEC;
6075 mvpp2_ingress_disable(port);
6077 /* Port default configuration */
6078 mvpp2_defaults_set(port);
6080 /* Port's classifier configuration */
6081 mvpp2_cls_oversize_rxq_set(port);
6082 mvpp2_cls_port_config(port);
6084 /* Provide an initial Rx packet size */
6085 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6087 /* Initialize pools for swf */
6088 err = mvpp2_swf_bm_pool_init(port);
6090 goto err_free_percpu;
6095 for (queue = 0; queue < txq_number; queue++) {
6096 if (!port->txqs[queue])
6098 free_percpu(port->txqs[queue]->pcpu);
6103 /* Ports initialization */
6104 static int mvpp2_port_probe(struct platform_device *pdev,
6105 struct device_node *port_node,
6107 int *next_first_rxq)
6109 struct device_node *phy_node;
6110 struct mvpp2_port *port;
6111 struct mvpp2_port_pcpu *port_pcpu;
6112 struct net_device *dev;
6113 struct resource *res;
6114 const char *dt_mac_addr;
6115 const char *mac_from;
6116 char hw_mac_addr[ETH_ALEN];
6120 int priv_common_regs_num = 2;
6123 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6128 phy_node = of_parse_phandle(port_node, "phy", 0);
6130 dev_err(&pdev->dev, "missing phy\n");
6132 goto err_free_netdev;
6135 phy_mode = of_get_phy_mode(port_node);
6137 dev_err(&pdev->dev, "incorrect phy mode\n");
6139 goto err_free_netdev;
6142 if (of_property_read_u32(port_node, "port-id", &id)) {
6144 dev_err(&pdev->dev, "missing port-id value\n");
6145 goto err_free_netdev;
6148 dev->tx_queue_len = MVPP2_MAX_TXD;
6149 dev->watchdog_timeo = 5 * HZ;
6150 dev->netdev_ops = &mvpp2_netdev_ops;
6151 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6153 port = netdev_priv(dev);
6155 port->irq = irq_of_parse_and_map(port_node, 0);
6156 if (port->irq <= 0) {
6158 goto err_free_netdev;
6161 if (of_property_read_bool(port_node, "marvell,loopback"))
6162 port->flags |= MVPP2_F_LOOPBACK;
6166 port->first_rxq = *next_first_rxq;
6167 port->phy_node = phy_node;
6168 port->phy_interface = phy_mode;
6170 res = platform_get_resource(pdev, IORESOURCE_MEM,
6171 priv_common_regs_num + id);
6172 port->base = devm_ioremap_resource(&pdev->dev, res);
6173 if (IS_ERR(port->base)) {
6174 err = PTR_ERR(port->base);
6178 /* Alloc per-cpu stats */
6179 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6185 dt_mac_addr = of_get_mac_address(port_node);
6186 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6187 mac_from = "device tree";
6188 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6190 mvpp2_get_mac_address(port, hw_mac_addr);
6191 if (is_valid_ether_addr(hw_mac_addr)) {
6192 mac_from = "hardware";
6193 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6195 mac_from = "random";
6196 eth_hw_addr_random(dev);
6200 port->tx_ring_size = MVPP2_MAX_TXD;
6201 port->rx_ring_size = MVPP2_MAX_RXD;
6203 SET_NETDEV_DEV(dev, &pdev->dev);
6205 err = mvpp2_port_init(port);
6207 dev_err(&pdev->dev, "failed to init port %d\n", id);
6208 goto err_free_stats;
6210 mvpp2_port_power_up(port);
6212 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6215 goto err_free_txq_pcpu;
6218 for_each_present_cpu(cpu) {
6219 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6221 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6222 HRTIMER_MODE_REL_PINNED);
6223 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6224 port_pcpu->timer_scheduled = false;
6226 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6227 (unsigned long)dev);
6230 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6231 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6232 dev->features = features | NETIF_F_RXCSUM;
6233 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6234 dev->vlan_features |= features;
6236 /* MTU range: 68 - 9676 */
6237 dev->min_mtu = ETH_MIN_MTU;
6238 /* 9676 == 9700 - 20 and rounding to 8 */
6239 dev->max_mtu = 9676;
6241 err = register_netdev(dev);
6243 dev_err(&pdev->dev, "failed to register netdev\n");
6244 goto err_free_port_pcpu;
6246 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6248 /* Increment the first Rx queue number to be used by the next port */
6249 *next_first_rxq += rxq_number;
6250 priv->port_list[id] = port;
6254 free_percpu(port->pcpu);
6256 for (i = 0; i < txq_number; i++)
6257 free_percpu(port->txqs[i]->pcpu);
6259 free_percpu(port->stats);
6261 irq_dispose_mapping(port->irq);
6263 of_node_put(phy_node);
6268 /* Ports removal routine */
6269 static void mvpp2_port_remove(struct mvpp2_port *port)
6273 unregister_netdev(port->dev);
6274 of_node_put(port->phy_node);
6275 free_percpu(port->pcpu);
6276 free_percpu(port->stats);
6277 for (i = 0; i < txq_number; i++)
6278 free_percpu(port->txqs[i]->pcpu);
6279 irq_dispose_mapping(port->irq);
6280 free_netdev(port->dev);
6283 /* Initialize decoding windows */
6284 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6290 for (i = 0; i < 6; i++) {
6291 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6292 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6295 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6300 for (i = 0; i < dram->num_cs; i++) {
6301 const struct mbus_dram_window *cs = dram->cs + i;
6303 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6304 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6305 dram->mbus_dram_target_id);
6307 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6308 (cs->size - 1) & 0xffff0000);
6310 win_enable |= (1 << i);
6313 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6316 /* Initialize Rx FIFO's */
6317 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6321 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6322 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6323 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6324 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6325 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6328 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6329 MVPP2_RX_FIFO_PORT_MIN_PKT);
6330 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6333 /* Initialize network controller common part HW */
6334 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6336 const struct mbus_dram_target_info *dram_target_info;
6340 /* Checks for hardware constraints */
6341 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6342 (txq_number > MVPP2_MAX_TXQ)) {
6343 dev_err(&pdev->dev, "invalid queue size parameter\n");
6347 /* MBUS windows configuration */
6348 dram_target_info = mv_mbus_dram_info();
6349 if (dram_target_info)
6350 mvpp2_conf_mbus_windows(dram_target_info, priv);
6352 /* Disable HW PHY polling */
6353 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6354 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6355 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6357 /* Allocate and initialize aggregated TXQs */
6358 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6359 sizeof(struct mvpp2_tx_queue),
6361 if (!priv->aggr_txqs)
6364 for_each_present_cpu(i) {
6365 priv->aggr_txqs[i].id = i;
6366 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6367 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6368 MVPP2_AGGR_TXQ_SIZE, i, priv);
6374 mvpp2_rx_fifo_init(priv);
6376 /* Reset Rx queue group interrupt configuration */
6377 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6378 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6380 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6381 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6383 /* Allow cache snoop when transmiting packets */
6384 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6386 /* Buffer Manager initialization */
6387 err = mvpp2_bm_init(pdev, priv);
6391 /* Parser default initialization */
6392 err = mvpp2_prs_default_init(pdev, priv);
6396 /* Classifier default initialization */
6397 mvpp2_cls_init(priv);
6402 static int mvpp2_probe(struct platform_device *pdev)
6404 struct device_node *dn = pdev->dev.of_node;
6405 struct device_node *port_node;
6407 struct resource *res;
6408 int port_count, first_rxq;
6411 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6415 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6416 priv->base = devm_ioremap_resource(&pdev->dev, res);
6417 if (IS_ERR(priv->base))
6418 return PTR_ERR(priv->base);
6420 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6421 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6422 if (IS_ERR(priv->lms_base))
6423 return PTR_ERR(priv->lms_base);
6425 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6426 if (IS_ERR(priv->pp_clk))
6427 return PTR_ERR(priv->pp_clk);
6428 err = clk_prepare_enable(priv->pp_clk);
6432 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6433 if (IS_ERR(priv->gop_clk)) {
6434 err = PTR_ERR(priv->gop_clk);
6437 err = clk_prepare_enable(priv->gop_clk);
6441 /* Get system's tclk rate */
6442 priv->tclk = clk_get_rate(priv->pp_clk);
6444 /* Initialize network controller */
6445 err = mvpp2_init(pdev, priv);
6447 dev_err(&pdev->dev, "failed to initialize controller\n");
6451 port_count = of_get_available_child_count(dn);
6452 if (port_count == 0) {
6453 dev_err(&pdev->dev, "no ports enabled\n");
6458 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6459 sizeof(struct mvpp2_port *),
6461 if (!priv->port_list) {
6466 /* Initialize ports */
6468 for_each_available_child_of_node(dn, port_node) {
6469 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6474 platform_set_drvdata(pdev, priv);
6478 clk_disable_unprepare(priv->gop_clk);
6480 clk_disable_unprepare(priv->pp_clk);
6484 static int mvpp2_remove(struct platform_device *pdev)
6486 struct mvpp2 *priv = platform_get_drvdata(pdev);
6487 struct device_node *dn = pdev->dev.of_node;
6488 struct device_node *port_node;
6491 for_each_available_child_of_node(dn, port_node) {
6492 if (priv->port_list[i])
6493 mvpp2_port_remove(priv->port_list[i]);
6497 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6498 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6500 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6503 for_each_present_cpu(i) {
6504 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6506 dma_free_coherent(&pdev->dev,
6507 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6509 aggr_txq->descs_phys);
6512 clk_disable_unprepare(priv->pp_clk);
6513 clk_disable_unprepare(priv->gop_clk);
6518 static const struct of_device_id mvpp2_match[] = {
6519 { .compatible = "marvell,armada-375-pp2" },
6522 MODULE_DEVICE_TABLE(of, mvpp2_match);
6524 static struct platform_driver mvpp2_driver = {
6525 .probe = mvpp2_probe,
6526 .remove = mvpp2_remove,
6528 .name = MVPP2_DRIVER_NAME,
6529 .of_match_table = mvpp2_match,
6533 module_platform_driver(mvpp2_driver);
6535 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6536 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6537 MODULE_LICENSE("GPL v2");