1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
10 #include <linux/if_vlan.h>
11 #include <linux/irq.h>
13 #include <linux/ipv6.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/aer.h>
17 #include <linux/skbuff.h>
18 #include <linux/sctp.h>
20 #include <net/ip6_checksum.h>
21 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
24 #include <net/geneve.h>
27 #include "hns3_enet.h"
28 /* All hns3 tracepoints are defined by the include below, which
29 * must be included exactly once across the whole kernel with
30 * CREATE_TRACE_POINTS defined
32 #define CREATE_TRACE_POINTS
33 #include "hns3_trace.h"
35 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
36 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
38 #define hns3_rl_err(fmt, ...) \
40 if (net_ratelimit()) \
41 netdev_err(fmt, ##__VA_ARGS__); \
44 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
46 static const char hns3_driver_name[] = "hns3";
47 static const char hns3_driver_string[] =
48 "Hisilicon Ethernet Network Driver for Hip08 Family";
49 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
50 static struct hnae3_client client;
52 static int debug = -1;
53 module_param(debug, int, 0);
54 MODULE_PARM_DESC(debug, " Network interface message level setting");
56 static unsigned int tx_spare_buf_size;
57 module_param(tx_spare_buf_size, uint, 0400);
58 MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
60 static unsigned int tx_sgl = 1;
61 module_param(tx_sgl, uint, 0600);
62 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
64 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
65 sizeof(struct sg_table))
66 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\
67 dma_get_cache_alignment())
69 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
72 #define HNS3_INNER_VLAN_TAG 1
73 #define HNS3_OUTER_VLAN_TAG 2
75 #define HNS3_MIN_TX_LEN 33U
77 /* hns3_pci_tbl - PCI Device ID Table
79 * Last entry must be all 0s
81 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
82 * Class, Class Mask, private data (not used) }
84 static const struct pci_device_id hns3_pci_tbl[] = {
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
88 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
89 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
90 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
91 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
92 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
93 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
94 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
95 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
96 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
97 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
98 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
99 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
100 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
101 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
102 /* required last entry */
105 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
107 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
114 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
115 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
117 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
118 HNS3_RX_PTYPE_UNUSED_ENTRY(0),
119 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
120 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
121 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
122 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
123 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
124 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
125 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
126 HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
127 HNS3_RX_PTYPE_UNUSED_ENTRY(9),
128 HNS3_RX_PTYPE_UNUSED_ENTRY(10),
129 HNS3_RX_PTYPE_UNUSED_ENTRY(11),
130 HNS3_RX_PTYPE_UNUSED_ENTRY(12),
131 HNS3_RX_PTYPE_UNUSED_ENTRY(13),
132 HNS3_RX_PTYPE_UNUSED_ENTRY(14),
133 HNS3_RX_PTYPE_UNUSED_ENTRY(15),
134 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
135 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
136 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
137 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
138 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
139 HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
140 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
141 HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
142 HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
143 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
144 HNS3_RX_PTYPE_UNUSED_ENTRY(26),
145 HNS3_RX_PTYPE_UNUSED_ENTRY(27),
146 HNS3_RX_PTYPE_UNUSED_ENTRY(28),
147 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
148 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
149 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
150 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
151 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
152 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
153 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
154 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
155 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
156 HNS3_RX_PTYPE_UNUSED_ENTRY(38),
157 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
158 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
159 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
160 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
161 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
162 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
163 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
164 HNS3_RX_PTYPE_UNUSED_ENTRY(46),
165 HNS3_RX_PTYPE_UNUSED_ENTRY(47),
166 HNS3_RX_PTYPE_UNUSED_ENTRY(48),
167 HNS3_RX_PTYPE_UNUSED_ENTRY(49),
168 HNS3_RX_PTYPE_UNUSED_ENTRY(50),
169 HNS3_RX_PTYPE_UNUSED_ENTRY(51),
170 HNS3_RX_PTYPE_UNUSED_ENTRY(52),
171 HNS3_RX_PTYPE_UNUSED_ENTRY(53),
172 HNS3_RX_PTYPE_UNUSED_ENTRY(54),
173 HNS3_RX_PTYPE_UNUSED_ENTRY(55),
174 HNS3_RX_PTYPE_UNUSED_ENTRY(56),
175 HNS3_RX_PTYPE_UNUSED_ENTRY(57),
176 HNS3_RX_PTYPE_UNUSED_ENTRY(58),
177 HNS3_RX_PTYPE_UNUSED_ENTRY(59),
178 HNS3_RX_PTYPE_UNUSED_ENTRY(60),
179 HNS3_RX_PTYPE_UNUSED_ENTRY(61),
180 HNS3_RX_PTYPE_UNUSED_ENTRY(62),
181 HNS3_RX_PTYPE_UNUSED_ENTRY(63),
182 HNS3_RX_PTYPE_UNUSED_ENTRY(64),
183 HNS3_RX_PTYPE_UNUSED_ENTRY(65),
184 HNS3_RX_PTYPE_UNUSED_ENTRY(66),
185 HNS3_RX_PTYPE_UNUSED_ENTRY(67),
186 HNS3_RX_PTYPE_UNUSED_ENTRY(68),
187 HNS3_RX_PTYPE_UNUSED_ENTRY(69),
188 HNS3_RX_PTYPE_UNUSED_ENTRY(70),
189 HNS3_RX_PTYPE_UNUSED_ENTRY(71),
190 HNS3_RX_PTYPE_UNUSED_ENTRY(72),
191 HNS3_RX_PTYPE_UNUSED_ENTRY(73),
192 HNS3_RX_PTYPE_UNUSED_ENTRY(74),
193 HNS3_RX_PTYPE_UNUSED_ENTRY(75),
194 HNS3_RX_PTYPE_UNUSED_ENTRY(76),
195 HNS3_RX_PTYPE_UNUSED_ENTRY(77),
196 HNS3_RX_PTYPE_UNUSED_ENTRY(78),
197 HNS3_RX_PTYPE_UNUSED_ENTRY(79),
198 HNS3_RX_PTYPE_UNUSED_ENTRY(80),
199 HNS3_RX_PTYPE_UNUSED_ENTRY(81),
200 HNS3_RX_PTYPE_UNUSED_ENTRY(82),
201 HNS3_RX_PTYPE_UNUSED_ENTRY(83),
202 HNS3_RX_PTYPE_UNUSED_ENTRY(84),
203 HNS3_RX_PTYPE_UNUSED_ENTRY(85),
204 HNS3_RX_PTYPE_UNUSED_ENTRY(86),
205 HNS3_RX_PTYPE_UNUSED_ENTRY(87),
206 HNS3_RX_PTYPE_UNUSED_ENTRY(88),
207 HNS3_RX_PTYPE_UNUSED_ENTRY(89),
208 HNS3_RX_PTYPE_UNUSED_ENTRY(90),
209 HNS3_RX_PTYPE_UNUSED_ENTRY(91),
210 HNS3_RX_PTYPE_UNUSED_ENTRY(92),
211 HNS3_RX_PTYPE_UNUSED_ENTRY(93),
212 HNS3_RX_PTYPE_UNUSED_ENTRY(94),
213 HNS3_RX_PTYPE_UNUSED_ENTRY(95),
214 HNS3_RX_PTYPE_UNUSED_ENTRY(96),
215 HNS3_RX_PTYPE_UNUSED_ENTRY(97),
216 HNS3_RX_PTYPE_UNUSED_ENTRY(98),
217 HNS3_RX_PTYPE_UNUSED_ENTRY(99),
218 HNS3_RX_PTYPE_UNUSED_ENTRY(100),
219 HNS3_RX_PTYPE_UNUSED_ENTRY(101),
220 HNS3_RX_PTYPE_UNUSED_ENTRY(102),
221 HNS3_RX_PTYPE_UNUSED_ENTRY(103),
222 HNS3_RX_PTYPE_UNUSED_ENTRY(104),
223 HNS3_RX_PTYPE_UNUSED_ENTRY(105),
224 HNS3_RX_PTYPE_UNUSED_ENTRY(106),
225 HNS3_RX_PTYPE_UNUSED_ENTRY(107),
226 HNS3_RX_PTYPE_UNUSED_ENTRY(108),
227 HNS3_RX_PTYPE_UNUSED_ENTRY(109),
228 HNS3_RX_PTYPE_UNUSED_ENTRY(110),
229 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
230 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
231 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
232 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
233 HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
234 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
235 HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
236 HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
237 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
238 HNS3_RX_PTYPE_UNUSED_ENTRY(120),
239 HNS3_RX_PTYPE_UNUSED_ENTRY(121),
240 HNS3_RX_PTYPE_UNUSED_ENTRY(122),
241 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
242 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
243 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
244 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
245 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
246 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
247 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
248 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
249 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
250 HNS3_RX_PTYPE_UNUSED_ENTRY(132),
251 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
252 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
253 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
254 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
255 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
256 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
257 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
258 HNS3_RX_PTYPE_UNUSED_ENTRY(140),
259 HNS3_RX_PTYPE_UNUSED_ENTRY(141),
260 HNS3_RX_PTYPE_UNUSED_ENTRY(142),
261 HNS3_RX_PTYPE_UNUSED_ENTRY(143),
262 HNS3_RX_PTYPE_UNUSED_ENTRY(144),
263 HNS3_RX_PTYPE_UNUSED_ENTRY(145),
264 HNS3_RX_PTYPE_UNUSED_ENTRY(146),
265 HNS3_RX_PTYPE_UNUSED_ENTRY(147),
266 HNS3_RX_PTYPE_UNUSED_ENTRY(148),
267 HNS3_RX_PTYPE_UNUSED_ENTRY(149),
268 HNS3_RX_PTYPE_UNUSED_ENTRY(150),
269 HNS3_RX_PTYPE_UNUSED_ENTRY(151),
270 HNS3_RX_PTYPE_UNUSED_ENTRY(152),
271 HNS3_RX_PTYPE_UNUSED_ENTRY(153),
272 HNS3_RX_PTYPE_UNUSED_ENTRY(154),
273 HNS3_RX_PTYPE_UNUSED_ENTRY(155),
274 HNS3_RX_PTYPE_UNUSED_ENTRY(156),
275 HNS3_RX_PTYPE_UNUSED_ENTRY(157),
276 HNS3_RX_PTYPE_UNUSED_ENTRY(158),
277 HNS3_RX_PTYPE_UNUSED_ENTRY(159),
278 HNS3_RX_PTYPE_UNUSED_ENTRY(160),
279 HNS3_RX_PTYPE_UNUSED_ENTRY(161),
280 HNS3_RX_PTYPE_UNUSED_ENTRY(162),
281 HNS3_RX_PTYPE_UNUSED_ENTRY(163),
282 HNS3_RX_PTYPE_UNUSED_ENTRY(164),
283 HNS3_RX_PTYPE_UNUSED_ENTRY(165),
284 HNS3_RX_PTYPE_UNUSED_ENTRY(166),
285 HNS3_RX_PTYPE_UNUSED_ENTRY(167),
286 HNS3_RX_PTYPE_UNUSED_ENTRY(168),
287 HNS3_RX_PTYPE_UNUSED_ENTRY(169),
288 HNS3_RX_PTYPE_UNUSED_ENTRY(170),
289 HNS3_RX_PTYPE_UNUSED_ENTRY(171),
290 HNS3_RX_PTYPE_UNUSED_ENTRY(172),
291 HNS3_RX_PTYPE_UNUSED_ENTRY(173),
292 HNS3_RX_PTYPE_UNUSED_ENTRY(174),
293 HNS3_RX_PTYPE_UNUSED_ENTRY(175),
294 HNS3_RX_PTYPE_UNUSED_ENTRY(176),
295 HNS3_RX_PTYPE_UNUSED_ENTRY(177),
296 HNS3_RX_PTYPE_UNUSED_ENTRY(178),
297 HNS3_RX_PTYPE_UNUSED_ENTRY(179),
298 HNS3_RX_PTYPE_UNUSED_ENTRY(180),
299 HNS3_RX_PTYPE_UNUSED_ENTRY(181),
300 HNS3_RX_PTYPE_UNUSED_ENTRY(182),
301 HNS3_RX_PTYPE_UNUSED_ENTRY(183),
302 HNS3_RX_PTYPE_UNUSED_ENTRY(184),
303 HNS3_RX_PTYPE_UNUSED_ENTRY(185),
304 HNS3_RX_PTYPE_UNUSED_ENTRY(186),
305 HNS3_RX_PTYPE_UNUSED_ENTRY(187),
306 HNS3_RX_PTYPE_UNUSED_ENTRY(188),
307 HNS3_RX_PTYPE_UNUSED_ENTRY(189),
308 HNS3_RX_PTYPE_UNUSED_ENTRY(190),
309 HNS3_RX_PTYPE_UNUSED_ENTRY(191),
310 HNS3_RX_PTYPE_UNUSED_ENTRY(192),
311 HNS3_RX_PTYPE_UNUSED_ENTRY(193),
312 HNS3_RX_PTYPE_UNUSED_ENTRY(194),
313 HNS3_RX_PTYPE_UNUSED_ENTRY(195),
314 HNS3_RX_PTYPE_UNUSED_ENTRY(196),
315 HNS3_RX_PTYPE_UNUSED_ENTRY(197),
316 HNS3_RX_PTYPE_UNUSED_ENTRY(198),
317 HNS3_RX_PTYPE_UNUSED_ENTRY(199),
318 HNS3_RX_PTYPE_UNUSED_ENTRY(200),
319 HNS3_RX_PTYPE_UNUSED_ENTRY(201),
320 HNS3_RX_PTYPE_UNUSED_ENTRY(202),
321 HNS3_RX_PTYPE_UNUSED_ENTRY(203),
322 HNS3_RX_PTYPE_UNUSED_ENTRY(204),
323 HNS3_RX_PTYPE_UNUSED_ENTRY(205),
324 HNS3_RX_PTYPE_UNUSED_ENTRY(206),
325 HNS3_RX_PTYPE_UNUSED_ENTRY(207),
326 HNS3_RX_PTYPE_UNUSED_ENTRY(208),
327 HNS3_RX_PTYPE_UNUSED_ENTRY(209),
328 HNS3_RX_PTYPE_UNUSED_ENTRY(210),
329 HNS3_RX_PTYPE_UNUSED_ENTRY(211),
330 HNS3_RX_PTYPE_UNUSED_ENTRY(212),
331 HNS3_RX_PTYPE_UNUSED_ENTRY(213),
332 HNS3_RX_PTYPE_UNUSED_ENTRY(214),
333 HNS3_RX_PTYPE_UNUSED_ENTRY(215),
334 HNS3_RX_PTYPE_UNUSED_ENTRY(216),
335 HNS3_RX_PTYPE_UNUSED_ENTRY(217),
336 HNS3_RX_PTYPE_UNUSED_ENTRY(218),
337 HNS3_RX_PTYPE_UNUSED_ENTRY(219),
338 HNS3_RX_PTYPE_UNUSED_ENTRY(220),
339 HNS3_RX_PTYPE_UNUSED_ENTRY(221),
340 HNS3_RX_PTYPE_UNUSED_ENTRY(222),
341 HNS3_RX_PTYPE_UNUSED_ENTRY(223),
342 HNS3_RX_PTYPE_UNUSED_ENTRY(224),
343 HNS3_RX_PTYPE_UNUSED_ENTRY(225),
344 HNS3_RX_PTYPE_UNUSED_ENTRY(226),
345 HNS3_RX_PTYPE_UNUSED_ENTRY(227),
346 HNS3_RX_PTYPE_UNUSED_ENTRY(228),
347 HNS3_RX_PTYPE_UNUSED_ENTRY(229),
348 HNS3_RX_PTYPE_UNUSED_ENTRY(230),
349 HNS3_RX_PTYPE_UNUSED_ENTRY(231),
350 HNS3_RX_PTYPE_UNUSED_ENTRY(232),
351 HNS3_RX_PTYPE_UNUSED_ENTRY(233),
352 HNS3_RX_PTYPE_UNUSED_ENTRY(234),
353 HNS3_RX_PTYPE_UNUSED_ENTRY(235),
354 HNS3_RX_PTYPE_UNUSED_ENTRY(236),
355 HNS3_RX_PTYPE_UNUSED_ENTRY(237),
356 HNS3_RX_PTYPE_UNUSED_ENTRY(238),
357 HNS3_RX_PTYPE_UNUSED_ENTRY(239),
358 HNS3_RX_PTYPE_UNUSED_ENTRY(240),
359 HNS3_RX_PTYPE_UNUSED_ENTRY(241),
360 HNS3_RX_PTYPE_UNUSED_ENTRY(242),
361 HNS3_RX_PTYPE_UNUSED_ENTRY(243),
362 HNS3_RX_PTYPE_UNUSED_ENTRY(244),
363 HNS3_RX_PTYPE_UNUSED_ENTRY(245),
364 HNS3_RX_PTYPE_UNUSED_ENTRY(246),
365 HNS3_RX_PTYPE_UNUSED_ENTRY(247),
366 HNS3_RX_PTYPE_UNUSED_ENTRY(248),
367 HNS3_RX_PTYPE_UNUSED_ENTRY(249),
368 HNS3_RX_PTYPE_UNUSED_ENTRY(250),
369 HNS3_RX_PTYPE_UNUSED_ENTRY(251),
370 HNS3_RX_PTYPE_UNUSED_ENTRY(252),
371 HNS3_RX_PTYPE_UNUSED_ENTRY(253),
372 HNS3_RX_PTYPE_UNUSED_ENTRY(254),
373 HNS3_RX_PTYPE_UNUSED_ENTRY(255),
376 #define HNS3_INVALID_PTYPE \
377 ARRAY_SIZE(hns3_rx_ptype_tbl)
379 static irqreturn_t hns3_irq_handle(int irq, void *vector)
381 struct hns3_enet_tqp_vector *tqp_vector = vector;
383 napi_schedule_irqoff(&tqp_vector->napi);
384 tqp_vector->event_cnt++;
389 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
391 struct hns3_enet_tqp_vector *tqp_vectors;
394 for (i = 0; i < priv->vector_num; i++) {
395 tqp_vectors = &priv->tqp_vector[i];
397 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
400 /* clear the affinity mask */
401 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
403 /* release the irq resource */
404 free_irq(tqp_vectors->vector_irq, tqp_vectors);
405 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
409 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
411 struct hns3_enet_tqp_vector *tqp_vectors;
412 int txrx_int_idx = 0;
418 for (i = 0; i < priv->vector_num; i++) {
419 tqp_vectors = &priv->tqp_vector[i];
421 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
424 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
425 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
426 "%s-%s-%s-%d", hns3_driver_name,
427 pci_name(priv->ae_handle->pdev),
428 "TxRx", txrx_int_idx++);
430 } else if (tqp_vectors->rx_group.ring) {
431 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
432 "%s-%s-%s-%d", hns3_driver_name,
433 pci_name(priv->ae_handle->pdev),
435 } else if (tqp_vectors->tx_group.ring) {
436 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
437 "%s-%s-%s-%d", hns3_driver_name,
438 pci_name(priv->ae_handle->pdev),
441 /* Skip this unused q_vector */
445 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
447 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
448 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
449 tqp_vectors->name, tqp_vectors);
451 netdev_err(priv->netdev, "request irq(%d) fail\n",
452 tqp_vectors->vector_irq);
453 hns3_nic_uninit_irq(priv);
457 irq_set_affinity_hint(tqp_vectors->vector_irq,
458 &tqp_vectors->affinity_mask);
460 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
466 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
469 writel(mask_en, tqp_vector->mask_addr);
472 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
474 napi_enable(&tqp_vector->napi);
475 enable_irq(tqp_vector->vector_irq);
478 hns3_mask_vector_irq(tqp_vector, 1);
481 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
484 hns3_mask_vector_irq(tqp_vector, 0);
486 disable_irq(tqp_vector->vector_irq);
487 napi_disable(&tqp_vector->napi);
488 cancel_work_sync(&tqp_vector->rx_group.dim.work);
489 cancel_work_sync(&tqp_vector->tx_group.dim.work);
492 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
495 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
497 /* this defines the configuration for RL (Interrupt Rate Limiter).
498 * Rl defines rate of interrupts i.e. number of interrupts-per-second
499 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
501 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
502 !tqp_vector->rx_group.coal.adapt_enable)
503 /* According to the hardware, the range of rl_reg is
504 * 0-59 and the unit is 4.
506 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
508 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
511 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
516 if (tqp_vector->rx_group.coal.unit_1us)
517 new_val = gl_value | HNS3_INT_GL_1US;
519 new_val = hns3_gl_usec_to_reg(gl_value);
521 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
524 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
529 if (tqp_vector->tx_group.coal.unit_1us)
530 new_val = gl_value | HNS3_INT_GL_1US;
532 new_val = hns3_gl_usec_to_reg(gl_value);
534 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
537 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
540 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
543 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
546 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
549 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
550 struct hns3_nic_priv *priv)
552 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
553 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
554 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
555 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
556 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
558 tx_coal->adapt_enable = ptx_coal->adapt_enable;
559 rx_coal->adapt_enable = prx_coal->adapt_enable;
561 tx_coal->int_gl = ptx_coal->int_gl;
562 rx_coal->int_gl = prx_coal->int_gl;
564 rx_coal->flow_level = prx_coal->flow_level;
565 tx_coal->flow_level = ptx_coal->flow_level;
567 /* device version above V3(include V3), GL can configure 1us
568 * unit, so uses 1us unit.
570 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
571 tx_coal->unit_1us = 1;
572 rx_coal->unit_1us = 1;
575 if (ae_dev->dev_specs.int_ql_max) {
576 tx_coal->ql_enable = 1;
577 rx_coal->ql_enable = 1;
578 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
579 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
580 tx_coal->int_ql = ptx_coal->int_ql;
581 rx_coal->int_ql = prx_coal->int_ql;
586 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
587 struct hns3_nic_priv *priv)
589 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
590 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
591 struct hnae3_handle *h = priv->ae_handle;
593 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
594 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
595 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
597 if (tx_coal->ql_enable)
598 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
600 if (rx_coal->ql_enable)
601 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
604 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
606 struct hnae3_handle *h = hns3_get_handle(netdev);
607 struct hnae3_knic_private_info *kinfo = &h->kinfo;
608 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
609 unsigned int queue_size = kinfo->num_tqps;
612 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
613 netdev_reset_tc(netdev);
615 ret = netdev_set_num_tc(netdev, tc_info->num_tc);
618 "netdev_set_num_tc fail, ret=%d!\n", ret);
622 for (i = 0; i < HNAE3_MAX_TC; i++) {
623 if (!test_bit(i, &tc_info->tc_en))
626 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
627 tc_info->tqp_offset[i]);
631 ret = netif_set_real_num_tx_queues(netdev, queue_size);
634 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
638 ret = netif_set_real_num_rx_queues(netdev, queue_size);
641 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
648 u16 hns3_get_max_available_channels(struct hnae3_handle *h)
650 u16 alloc_tqps, max_rss_size, rss_size;
652 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
653 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
655 return min_t(u16, rss_size, max_rss_size);
658 static void hns3_tqp_enable(struct hnae3_queue *tqp)
662 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
663 rcb_reg |= BIT(HNS3_RING_EN_B);
664 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
667 static void hns3_tqp_disable(struct hnae3_queue *tqp)
671 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
672 rcb_reg &= ~BIT(HNS3_RING_EN_B);
673 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
676 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
678 #ifdef CONFIG_RFS_ACCEL
679 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
680 netdev->rx_cpu_rmap = NULL;
684 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
686 #ifdef CONFIG_RFS_ACCEL
687 struct hns3_nic_priv *priv = netdev_priv(netdev);
688 struct hns3_enet_tqp_vector *tqp_vector;
691 if (!netdev->rx_cpu_rmap) {
692 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
693 if (!netdev->rx_cpu_rmap)
697 for (i = 0; i < priv->vector_num; i++) {
698 tqp_vector = &priv->tqp_vector[i];
699 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
700 tqp_vector->vector_irq);
702 hns3_free_rx_cpu_rmap(netdev);
710 static int hns3_nic_net_up(struct net_device *netdev)
712 struct hns3_nic_priv *priv = netdev_priv(netdev);
713 struct hnae3_handle *h = priv->ae_handle;
717 ret = hns3_nic_reset_all_ring(h);
721 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
723 /* enable the vectors */
724 for (i = 0; i < priv->vector_num; i++)
725 hns3_vector_enable(&priv->tqp_vector[i]);
728 for (j = 0; j < h->kinfo.num_tqps; j++)
729 hns3_tqp_enable(h->kinfo.tqp[j]);
731 /* start the ae_dev */
732 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
734 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
736 hns3_tqp_disable(h->kinfo.tqp[j]);
738 for (j = i - 1; j >= 0; j--)
739 hns3_vector_disable(&priv->tqp_vector[j]);
745 static void hns3_config_xps(struct hns3_nic_priv *priv)
749 for (i = 0; i < priv->vector_num; i++) {
750 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
751 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
756 ret = netif_set_xps_queue(priv->netdev,
757 &tqp_vector->affinity_mask,
758 ring->tqp->tqp_index);
760 netdev_warn(priv->netdev,
761 "set xps queue failed: %d", ret);
768 static int hns3_nic_net_open(struct net_device *netdev)
770 struct hns3_nic_priv *priv = netdev_priv(netdev);
771 struct hnae3_handle *h = hns3_get_handle(netdev);
772 struct hnae3_knic_private_info *kinfo;
775 if (hns3_nic_resetting(netdev))
778 netif_carrier_off(netdev);
780 ret = hns3_nic_set_real_num_queue(netdev);
784 ret = hns3_nic_net_up(netdev);
786 netdev_err(netdev, "net up fail, ret=%d!\n", ret);
791 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
792 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
794 if (h->ae_algo->ops->set_timer_task)
795 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
797 hns3_config_xps(priv);
799 netif_dbg(h, drv, netdev, "net open\n");
804 static void hns3_reset_tx_queue(struct hnae3_handle *h)
806 struct net_device *ndev = h->kinfo.netdev;
807 struct hns3_nic_priv *priv = netdev_priv(ndev);
808 struct netdev_queue *dev_queue;
811 for (i = 0; i < h->kinfo.num_tqps; i++) {
812 dev_queue = netdev_get_tx_queue(ndev,
813 priv->ring[i].queue_index);
814 netdev_tx_reset_queue(dev_queue);
818 static void hns3_nic_net_down(struct net_device *netdev)
820 struct hns3_nic_priv *priv = netdev_priv(netdev);
821 struct hnae3_handle *h = hns3_get_handle(netdev);
822 const struct hnae3_ae_ops *ops;
825 /* disable vectors */
826 for (i = 0; i < priv->vector_num; i++)
827 hns3_vector_disable(&priv->tqp_vector[i]);
830 for (i = 0; i < h->kinfo.num_tqps; i++)
831 hns3_tqp_disable(h->kinfo.tqp[i]);
834 ops = priv->ae_handle->ae_algo->ops;
836 ops->stop(priv->ae_handle);
838 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
839 * during reset process, because driver may not be able
840 * to disable the ring through firmware when downing the netdev.
842 if (!hns3_nic_resetting(netdev))
843 hns3_clear_all_ring(priv->ae_handle, false);
845 hns3_reset_tx_queue(priv->ae_handle);
848 static int hns3_nic_net_stop(struct net_device *netdev)
850 struct hns3_nic_priv *priv = netdev_priv(netdev);
851 struct hnae3_handle *h = hns3_get_handle(netdev);
853 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
856 netif_dbg(h, drv, netdev, "net stop\n");
858 if (h->ae_algo->ops->set_timer_task)
859 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
861 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
864 hns3_nic_net_down(netdev);
869 static int hns3_nic_uc_sync(struct net_device *netdev,
870 const unsigned char *addr)
872 struct hnae3_handle *h = hns3_get_handle(netdev);
874 if (h->ae_algo->ops->add_uc_addr)
875 return h->ae_algo->ops->add_uc_addr(h, addr);
880 static int hns3_nic_uc_unsync(struct net_device *netdev,
881 const unsigned char *addr)
883 struct hnae3_handle *h = hns3_get_handle(netdev);
885 /* need ignore the request of removing device address, because
886 * we store the device address and other addresses of uc list
887 * in the function's mac filter list.
889 if (ether_addr_equal(addr, netdev->dev_addr))
892 if (h->ae_algo->ops->rm_uc_addr)
893 return h->ae_algo->ops->rm_uc_addr(h, addr);
898 static int hns3_nic_mc_sync(struct net_device *netdev,
899 const unsigned char *addr)
901 struct hnae3_handle *h = hns3_get_handle(netdev);
903 if (h->ae_algo->ops->add_mc_addr)
904 return h->ae_algo->ops->add_mc_addr(h, addr);
909 static int hns3_nic_mc_unsync(struct net_device *netdev,
910 const unsigned char *addr)
912 struct hnae3_handle *h = hns3_get_handle(netdev);
914 if (h->ae_algo->ops->rm_mc_addr)
915 return h->ae_algo->ops->rm_mc_addr(h, addr);
920 static u8 hns3_get_netdev_flags(struct net_device *netdev)
924 if (netdev->flags & IFF_PROMISC)
925 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
926 else if (netdev->flags & IFF_ALLMULTI)
927 flags = HNAE3_USER_MPE;
932 static void hns3_nic_set_rx_mode(struct net_device *netdev)
934 struct hnae3_handle *h = hns3_get_handle(netdev);
937 new_flags = hns3_get_netdev_flags(netdev);
939 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
940 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
942 /* User mode Promisc mode enable and vlan filtering is disabled to
943 * let all packets in.
945 h->netdev_flags = new_flags;
946 hns3_request_update_promisc_mode(h);
949 void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
951 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
953 if (ops->request_update_promisc_mode)
954 ops->request_update_promisc_mode(handle);
957 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
959 struct hns3_tx_spare *tx_spare = ring->tx_spare;
962 /* This smp_load_acquire() pairs with smp_store_release() in
963 * hns3_tx_spare_update() called in tx desc cleaning process.
965 ntc = smp_load_acquire(&tx_spare->last_to_clean);
966 ntu = tx_spare->next_to_use;
969 return ntc - ntu - 1;
971 /* The free tx buffer is divided into two part, so pick the
974 return (ntc > (tx_spare->len - ntu) ? ntc :
975 (tx_spare->len - ntu)) - 1;
978 static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
980 struct hns3_tx_spare *tx_spare = ring->tx_spare;
983 tx_spare->last_to_clean == tx_spare->next_to_clean)
986 /* This smp_store_release() pairs with smp_load_acquire() in
987 * hns3_tx_spare_space() called in xmit process.
989 smp_store_release(&tx_spare->last_to_clean,
990 tx_spare->next_to_clean);
993 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
997 u32 len = skb->len <= ring->tx_copybreak ? skb->len :
1000 if (len > ring->tx_copybreak)
1003 if (ALIGN(len, dma_get_cache_alignment()) > space) {
1004 u64_stats_update_begin(&ring->syncp);
1005 ring->stats.tx_spare_full++;
1006 u64_stats_update_end(&ring->syncp);
1013 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
1014 struct sk_buff *skb,
1017 if (skb->len <= ring->tx_copybreak || !tx_sgl ||
1018 (!skb_has_frag_list(skb) &&
1019 skb_shinfo(skb)->nr_frags < tx_sgl))
1022 if (space < HNS3_MAX_SGL_SIZE) {
1023 u64_stats_update_begin(&ring->syncp);
1024 ring->stats.tx_spare_full++;
1025 u64_stats_update_end(&ring->syncp);
1032 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
1034 struct hns3_tx_spare *tx_spare;
1040 alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
1041 ring->tqp->handle->kinfo.tx_spare_buf_size;
1045 order = get_order(alloc_size);
1046 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
1049 /* The driver still work without the tx spare buffer */
1050 dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
1054 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
1057 dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
1058 devm_kfree(ring_to_dev(ring), tx_spare);
1062 dma = dma_map_page(ring_to_dev(ring), page, 0,
1063 PAGE_SIZE << order, DMA_TO_DEVICE);
1064 if (dma_mapping_error(ring_to_dev(ring), dma)) {
1065 dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
1067 devm_kfree(ring_to_dev(ring), tx_spare);
1071 tx_spare->dma = dma;
1072 tx_spare->buf = page_address(page);
1073 tx_spare->len = PAGE_SIZE << order;
1074 ring->tx_spare = tx_spare;
1077 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1078 * before calling below function to allocate tx buffer.
1080 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
1081 unsigned int size, dma_addr_t *dma,
1084 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1085 u32 ntu = tx_spare->next_to_use;
1087 size = ALIGN(size, dma_get_cache_alignment());
1090 /* Tx spare buffer wraps back here because the end of
1091 * freed tx buffer is not enough.
1093 if (ntu + size > tx_spare->len) {
1094 *cb_len += (tx_spare->len - ntu);
1098 tx_spare->next_to_use = ntu + size;
1099 if (tx_spare->next_to_use == tx_spare->len)
1100 tx_spare->next_to_use = 0;
1102 *dma = tx_spare->dma + ntu;
1104 return tx_spare->buf + ntu;
1107 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
1109 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1111 if (len > tx_spare->next_to_use) {
1112 len -= tx_spare->next_to_use;
1113 tx_spare->next_to_use = tx_spare->len - len;
1115 tx_spare->next_to_use -= len;
1119 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
1120 struct hns3_desc_cb *cb)
1122 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1123 u32 ntc = tx_spare->next_to_clean;
1124 u32 len = cb->length;
1126 tx_spare->next_to_clean += len;
1128 if (tx_spare->next_to_clean >= tx_spare->len) {
1129 tx_spare->next_to_clean -= tx_spare->len;
1131 if (tx_spare->next_to_clean) {
1133 len = tx_spare->next_to_clean;
1137 /* This tx spare buffer is only really reclaimed after calling
1138 * hns3_tx_spare_update(), so it is still safe to use the info in
1139 * the tx buffer to do the dma sync or sg unmapping after
1140 * tx_spare->next_to_clean is moved forword.
1142 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
1143 dma_addr_t dma = tx_spare->dma + ntc;
1145 dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
1148 struct sg_table *sgt = tx_spare->buf + ntc;
1150 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
1155 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
1156 u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
1158 u32 l4_offset, hdr_len;
1159 union l3_hdr_info l3;
1160 union l4_hdr_info l4;
1164 if (!skb_is_gso(skb))
1167 ret = skb_cow_head(skb, 0);
1168 if (unlikely(ret < 0))
1171 l3.hdr = skb_network_header(skb);
1172 l4.hdr = skb_transport_header(skb);
1174 /* Software should clear the IPv4's checksum field when tso is
1177 if (l3.v4->version == 4)
1181 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1183 SKB_GSO_UDP_TUNNEL |
1184 SKB_GSO_UDP_TUNNEL_CSUM)) {
1185 /* reset l3&l4 pointers from outer to inner headers */
1186 l3.hdr = skb_inner_network_header(skb);
1187 l4.hdr = skb_inner_transport_header(skb);
1189 /* Software should clear the IPv4's checksum field when
1192 if (l3.v4->version == 4)
1196 /* normal or tunnel packet */
1197 l4_offset = l4.hdr - skb->data;
1199 /* remove payload length from inner pseudo checksum when tso */
1200 l4_paylen = skb->len - l4_offset;
1202 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1203 hdr_len = sizeof(*l4.udp) + l4_offset;
1204 csum_replace_by_diff(&l4.udp->check,
1205 (__force __wsum)htonl(l4_paylen));
1207 hdr_len = (l4.tcp->doff << 2) + l4_offset;
1208 csum_replace_by_diff(&l4.tcp->check,
1209 (__force __wsum)htonl(l4_paylen));
1212 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
1214 /* find the txbd field values */
1215 *paylen_fdop_ol4cs = skb->len - hdr_len;
1216 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
1218 /* offload outer UDP header checksum */
1219 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1220 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
1222 /* get MSS for TSO */
1223 *mss = skb_shinfo(skb)->gso_size;
1225 trace_hns3_tso(skb);
1230 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
1233 union l3_hdr_info l3;
1234 unsigned char *l4_hdr;
1235 unsigned char *exthdr;
1239 /* find outer header point */
1240 l3.hdr = skb_network_header(skb);
1241 l4_hdr = skb_transport_header(skb);
1243 if (skb->protocol == htons(ETH_P_IPV6)) {
1244 exthdr = l3.hdr + sizeof(*l3.v6);
1245 l4_proto_tmp = l3.v6->nexthdr;
1246 if (l4_hdr != exthdr)
1247 ipv6_skip_exthdr(skb, exthdr - skb->data,
1248 &l4_proto_tmp, &frag_off);
1249 } else if (skb->protocol == htons(ETH_P_IP)) {
1250 l4_proto_tmp = l3.v4->protocol;
1255 *ol4_proto = l4_proto_tmp;
1258 if (!skb->encapsulation) {
1263 /* find inner header point */
1264 l3.hdr = skb_inner_network_header(skb);
1265 l4_hdr = skb_inner_transport_header(skb);
1267 if (l3.v6->version == 6) {
1268 exthdr = l3.hdr + sizeof(*l3.v6);
1269 l4_proto_tmp = l3.v6->nexthdr;
1270 if (l4_hdr != exthdr)
1271 ipv6_skip_exthdr(skb, exthdr - skb->data,
1272 &l4_proto_tmp, &frag_off);
1273 } else if (l3.v4->version == 4) {
1274 l4_proto_tmp = l3.v4->protocol;
1277 *il4_proto = l4_proto_tmp;
1282 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1283 * and it is udp packet, which has a dest port as the IANA assigned.
1284 * the hardware is expected to do the checksum offload, but the
1285 * hardware will not do the checksum offload when udp dest port is
1286 * 4789, 4790 or 6081.
1288 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1290 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1291 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
1292 union l4_hdr_info l4;
1294 /* device version above V3(include V3), the hardware can
1295 * do this checksum offload.
1297 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
1300 l4.hdr = skb_transport_header(skb);
1302 if (!(!skb->encapsulation &&
1303 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
1304 l4.udp->dest == htons(GENEVE_UDP_PORT) ||
1305 l4.udp->dest == htons(4790))))
1311 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1312 u32 *ol_type_vlan_len_msec)
1314 u32 l2_len, l3_len, l4_len;
1315 unsigned char *il2_hdr;
1316 union l3_hdr_info l3;
1317 union l4_hdr_info l4;
1319 l3.hdr = skb_network_header(skb);
1320 l4.hdr = skb_transport_header(skb);
1322 /* compute OL2 header size, defined in 2 Bytes */
1323 l2_len = l3.hdr - skb->data;
1324 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
1326 /* compute OL3 header size, defined in 4 Bytes */
1327 l3_len = l4.hdr - l3.hdr;
1328 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
1330 il2_hdr = skb_inner_mac_header(skb);
1331 /* compute OL4 header size, defined in 4 Bytes */
1332 l4_len = il2_hdr - l4.hdr;
1333 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
1335 /* define outer network header type */
1336 if (skb->protocol == htons(ETH_P_IP)) {
1337 if (skb_is_gso(skb))
1338 hns3_set_field(*ol_type_vlan_len_msec,
1340 HNS3_OL3T_IPV4_CSUM);
1342 hns3_set_field(*ol_type_vlan_len_msec,
1344 HNS3_OL3T_IPV4_NO_CSUM);
1345 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1346 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
1350 if (ol4_proto == IPPROTO_UDP)
1351 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1352 HNS3_TUN_MAC_IN_UDP);
1353 else if (ol4_proto == IPPROTO_GRE)
1354 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1358 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1359 u8 il4_proto, u32 *type_cs_vlan_tso,
1360 u32 *ol_type_vlan_len_msec)
1362 unsigned char *l2_hdr = skb->data;
1363 u32 l4_proto = ol4_proto;
1364 union l4_hdr_info l4;
1365 union l3_hdr_info l3;
1368 l4.hdr = skb_transport_header(skb);
1369 l3.hdr = skb_network_header(skb);
1371 /* handle encapsulation skb */
1372 if (skb->encapsulation) {
1373 /* If this is a not UDP/GRE encapsulation skb */
1374 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
1375 /* drop the skb tunnel packet if hardware don't support,
1376 * because hardware can't calculate csum when TSO.
1378 if (skb_is_gso(skb))
1381 /* the stack computes the IP header already,
1382 * driver calculate l4 checksum when not TSO.
1384 return skb_checksum_help(skb);
1387 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
1389 /* switch to inner header */
1390 l2_hdr = skb_inner_mac_header(skb);
1391 l3.hdr = skb_inner_network_header(skb);
1392 l4.hdr = skb_inner_transport_header(skb);
1393 l4_proto = il4_proto;
1396 if (l3.v4->version == 4) {
1397 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1400 /* the stack computes the IP header already, the only time we
1401 * need the hardware to recompute it is in the case of TSO.
1403 if (skb_is_gso(skb))
1404 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
1405 } else if (l3.v6->version == 6) {
1406 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1410 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1411 l2_len = l3.hdr - l2_hdr;
1412 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
1414 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1415 l3_len = l4.hdr - l3.hdr;
1416 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
1418 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1421 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1422 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1424 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1428 if (hns3_tunnel_csum_bug(skb))
1429 return skb_checksum_help(skb);
1431 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1432 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1434 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1435 (sizeof(struct udphdr) >> 2));
1438 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1439 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1441 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1442 (sizeof(struct sctphdr) >> 2));
1445 /* drop the skb tunnel packet if hardware don't support,
1446 * because hardware can't calculate csum when TSO.
1448 if (skb_is_gso(skb))
1451 /* the stack computes the IP header already,
1452 * driver calculate l4 checksum when not TSO.
1454 return skb_checksum_help(skb);
1460 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
1461 struct sk_buff *skb)
1463 struct hnae3_handle *handle = tx_ring->tqp->handle;
1464 struct hnae3_ae_dev *ae_dev;
1465 struct vlan_ethhdr *vhdr;
1468 if (!(skb->protocol == htons(ETH_P_8021Q) ||
1469 skb_vlan_tag_present(skb)))
1472 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1473 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1474 * will cause RAS error.
1476 ae_dev = pci_get_drvdata(handle->pdev);
1477 if (unlikely(skb_vlan_tagged_multi(skb) &&
1478 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
1479 handle->port_base_vlan_state ==
1480 HNAE3_PORT_BASE_VLAN_ENABLE))
1483 if (skb->protocol == htons(ETH_P_8021Q) &&
1484 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1485 /* When HW VLAN acceleration is turned off, and the stack
1486 * sets the protocol to 802.1q, the driver just need to
1487 * set the protocol to the encapsulated ethertype.
1489 skb->protocol = vlan_get_protocol(skb);
1493 if (skb_vlan_tag_present(skb)) {
1494 /* Based on hw strategy, use out_vtag in two layer tag case,
1495 * and use inner_vtag in one tag case.
1497 if (skb->protocol == htons(ETH_P_8021Q) &&
1498 handle->port_base_vlan_state ==
1499 HNAE3_PORT_BASE_VLAN_DISABLE)
1500 rc = HNS3_OUTER_VLAN_TAG;
1502 rc = HNS3_INNER_VLAN_TAG;
1504 skb->protocol = vlan_get_protocol(skb);
1508 rc = skb_cow_head(skb, 0);
1509 if (unlikely(rc < 0))
1512 vhdr = (struct vlan_ethhdr *)skb->data;
1513 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
1516 skb->protocol = vlan_get_protocol(skb);
1520 /* check if the hardware is capable of checksum offloading */
1521 static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
1523 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1525 /* Kindly note, due to backward compatibility of the TX descriptor,
1526 * HW checksum of the non-IP packets and GSO packets is handled at
1527 * different place in the following code
1529 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
1530 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
1536 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1537 struct sk_buff *skb, struct hns3_desc *desc,
1538 struct hns3_desc_cb *desc_cb)
1540 u32 ol_type_vlan_len_msec = 0;
1541 u32 paylen_ol4cs = skb->len;
1542 u32 type_cs_vlan_tso = 0;
1543 u16 mss_hw_csum = 0;
1548 ret = hns3_handle_vtags(ring, skb);
1549 if (unlikely(ret < 0)) {
1550 u64_stats_update_begin(&ring->syncp);
1551 ring->stats.tx_vlan_err++;
1552 u64_stats_update_end(&ring->syncp);
1554 } else if (ret == HNS3_INNER_VLAN_TAG) {
1555 inner_vtag = skb_vlan_tag_get(skb);
1556 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1558 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
1559 } else if (ret == HNS3_OUTER_VLAN_TAG) {
1560 out_vtag = skb_vlan_tag_get(skb);
1561 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1563 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1567 desc_cb->send_bytes = skb->len;
1569 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1570 u8 ol4_proto, il4_proto;
1572 if (hns3_check_hw_tx_csum(skb)) {
1573 /* set checksum start and offset, defined in 2 Bytes */
1574 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
1575 skb_checksum_start_offset(skb) >> 1);
1576 hns3_set_field(ol_type_vlan_len_msec,
1577 HNS3_TXD_CSUM_OFFSET_S,
1578 skb->csum_offset >> 1);
1579 mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
1580 goto out_hw_tx_csum;
1583 skb_reset_mac_len(skb);
1585 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1586 if (unlikely(ret < 0)) {
1587 u64_stats_update_begin(&ring->syncp);
1588 ring->stats.tx_l4_proto_err++;
1589 u64_stats_update_end(&ring->syncp);
1593 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1595 &ol_type_vlan_len_msec);
1596 if (unlikely(ret < 0)) {
1597 u64_stats_update_begin(&ring->syncp);
1598 ring->stats.tx_l2l3l4_err++;
1599 u64_stats_update_end(&ring->syncp);
1603 ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
1604 &type_cs_vlan_tso, &desc_cb->send_bytes);
1605 if (unlikely(ret < 0)) {
1606 u64_stats_update_begin(&ring->syncp);
1607 ring->stats.tx_tso_err++;
1608 u64_stats_update_end(&ring->syncp);
1615 desc->tx.ol_type_vlan_len_msec =
1616 cpu_to_le32(ol_type_vlan_len_msec);
1617 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
1618 desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
1619 desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
1620 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1621 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1626 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
1629 #define HNS3_LIKELY_BD_NUM 1
1631 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1632 unsigned int frag_buf_num;
1635 if (likely(size <= HNS3_MAX_BD_SIZE)) {
1636 desc->addr = cpu_to_le64(dma);
1637 desc->tx.send_size = cpu_to_le16(size);
1638 desc->tx.bdtp_fe_sc_vld_ra_ri =
1639 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1641 trace_hns3_tx_desc(ring, ring->next_to_use);
1642 ring_ptr_move_fw(ring, next_to_use);
1643 return HNS3_LIKELY_BD_NUM;
1646 frag_buf_num = hns3_tx_bd_count(size);
1647 sizeoflast = size % HNS3_MAX_BD_SIZE;
1648 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1650 /* When frag size is bigger than hardware limit, split this frag */
1651 for (k = 0; k < frag_buf_num; k++) {
1652 /* now, fill the descriptor */
1653 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1654 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1655 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1656 desc->tx.bdtp_fe_sc_vld_ra_ri =
1657 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1659 trace_hns3_tx_desc(ring, ring->next_to_use);
1660 /* move ring pointer to next */
1661 ring_ptr_move_fw(ring, next_to_use);
1663 desc = &ring->desc[ring->next_to_use];
1666 return frag_buf_num;
1669 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
1672 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1673 struct device *dev = ring_to_dev(ring);
1677 if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
1678 struct sk_buff *skb = (struct sk_buff *)priv;
1680 size = skb_headlen(skb);
1684 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1685 } else if (type & DESC_TYPE_BOUNCE_HEAD) {
1686 /* Head data has been filled in hns3_handle_tx_bounce(),
1687 * just return 0 here.
1691 skb_frag_t *frag = (skb_frag_t *)priv;
1693 size = skb_frag_size(frag);
1697 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1700 if (unlikely(dma_mapping_error(dev, dma))) {
1701 u64_stats_update_begin(&ring->syncp);
1702 ring->stats.sw_err_cnt++;
1703 u64_stats_update_end(&ring->syncp);
1707 desc_cb->priv = priv;
1708 desc_cb->length = size;
1710 desc_cb->type = type;
1712 return hns3_fill_desc(ring, dma, size);
1715 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1716 unsigned int bd_num)
1721 size = skb_headlen(skb);
1722 while (size > HNS3_MAX_BD_SIZE) {
1723 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1724 size -= HNS3_MAX_BD_SIZE;
1726 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1731 bd_size[bd_num++] = size;
1732 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1737 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1738 size = skb_frag_size(frag);
1742 while (size > HNS3_MAX_BD_SIZE) {
1743 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1744 size -= HNS3_MAX_BD_SIZE;
1746 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1750 bd_size[bd_num++] = size;
1751 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1758 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1759 u8 max_non_tso_bd_num, unsigned int bd_num,
1760 unsigned int recursion_level)
1762 #define HNS3_MAX_RECURSION_LEVEL 24
1764 struct sk_buff *frag_skb;
1766 /* If the total len is within the max bd limit */
1767 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
1768 !skb_has_frag_list(skb) &&
1769 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
1770 return skb_shinfo(skb)->nr_frags + 1U;
1772 if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
1775 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
1776 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
1779 skb_walk_frags(skb, frag_skb) {
1780 bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
1781 bd_num, recursion_level + 1);
1782 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1789 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1791 if (!skb->encapsulation)
1792 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1794 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1797 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1798 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1799 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1800 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1801 * than MSS except the last max_non_tso_bd_num - 1 frags.
1803 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1804 unsigned int bd_num, u8 max_non_tso_bd_num)
1806 unsigned int tot_len = 0;
1809 for (i = 0; i < max_non_tso_bd_num - 1U; i++)
1810 tot_len += bd_size[i];
1812 /* ensure the first max_non_tso_bd_num frags is greater than
1815 if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
1816 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
1819 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1820 * than mss except the last one.
1822 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
1823 tot_len -= bd_size[i];
1824 tot_len += bd_size[i + max_non_tso_bd_num - 1U];
1826 if (tot_len < skb_shinfo(skb)->gso_size)
1833 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
1837 for (i = 0; i < MAX_SKB_FRAGS; i++)
1838 size[i] = skb_frag_size(&shinfo->frags[i]);
1841 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
1842 struct sk_buff *skb,
1843 u8 max_non_tso_bd_num,
1844 unsigned int bd_num)
1846 /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1847 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1849 if (bd_num == UINT_MAX) {
1850 u64_stats_update_begin(&ring->syncp);
1851 ring->stats.over_max_recursion++;
1852 u64_stats_update_end(&ring->syncp);
1856 /* The skb->len has exceeded the hw limitation, linearization
1859 if (skb->len > HNS3_MAX_TSO_SIZE ||
1860 (!skb_is_gso(skb) && skb->len >
1861 HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
1862 u64_stats_update_begin(&ring->syncp);
1863 ring->stats.hw_limitation++;
1864 u64_stats_update_end(&ring->syncp);
1868 if (__skb_linearize(skb)) {
1869 u64_stats_update_begin(&ring->syncp);
1870 ring->stats.sw_err_cnt++;
1871 u64_stats_update_end(&ring->syncp);
1878 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1879 struct net_device *netdev,
1880 struct sk_buff *skb)
1882 struct hns3_nic_priv *priv = netdev_priv(netdev);
1883 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
1884 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
1885 unsigned int bd_num;
1887 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
1888 if (unlikely(bd_num > max_non_tso_bd_num)) {
1889 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
1890 !hns3_skb_need_linearized(skb, bd_size, bd_num,
1891 max_non_tso_bd_num)) {
1892 trace_hns3_over_max_bd(skb);
1896 if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
1900 bd_num = hns3_tx_bd_count(skb->len);
1902 u64_stats_update_begin(&ring->syncp);
1903 ring->stats.tx_copy++;
1904 u64_stats_update_end(&ring->syncp);
1908 if (likely(ring_space(ring) >= bd_num))
1911 netif_stop_subqueue(netdev, ring->queue_index);
1912 smp_mb(); /* Memory barrier before checking ring_space */
1914 /* Start queue in case hns3_clean_tx_ring has just made room
1915 * available and has not seen the queue stopped state performed
1916 * by netif_stop_subqueue above.
1918 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
1919 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
1920 netif_start_subqueue(netdev, ring->queue_index);
1924 u64_stats_update_begin(&ring->syncp);
1925 ring->stats.tx_busy++;
1926 u64_stats_update_end(&ring->syncp);
1931 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1933 struct device *dev = ring_to_dev(ring);
1936 for (i = 0; i < ring->desc_num; i++) {
1937 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1938 struct hns3_desc_cb *desc_cb;
1940 memset(desc, 0, sizeof(*desc));
1942 /* check if this is where we started */
1943 if (ring->next_to_use == next_to_use_orig)
1947 ring_ptr_move_bw(ring, next_to_use);
1949 desc_cb = &ring->desc_cb[ring->next_to_use];
1954 /* unmap the descriptor dma address */
1955 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
1956 dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
1958 else if (desc_cb->type &
1959 (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
1960 hns3_tx_spare_rollback(ring, desc_cb->length);
1961 else if (desc_cb->length)
1962 dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
1965 desc_cb->length = 0;
1967 desc_cb->type = DESC_TYPE_UNKNOWN;
1971 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
1972 struct sk_buff *skb, unsigned int type)
1974 struct sk_buff *frag_skb;
1975 int i, ret, bd_num = 0;
1977 ret = hns3_map_and_fill_desc(ring, skb, type);
1978 if (unlikely(ret < 0))
1983 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1984 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1986 ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
1987 if (unlikely(ret < 0))
1993 skb_walk_frags(skb, frag_skb) {
1994 ret = hns3_fill_skb_to_desc(ring, frag_skb,
1995 DESC_TYPE_FRAGLIST_SKB);
1996 if (unlikely(ret < 0))
2005 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
2008 ring->pending_buf += num;
2011 u64_stats_update_begin(&ring->syncp);
2012 ring->stats.tx_more++;
2013 u64_stats_update_end(&ring->syncp);
2017 if (!ring->pending_buf)
2020 writel(ring->pending_buf,
2021 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
2022 ring->pending_buf = 0;
2023 WRITE_ONCE(ring->last_to_use, ring->next_to_use);
2026 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
2027 struct hns3_desc *desc)
2029 struct hnae3_handle *h = hns3_get_handle(netdev);
2031 if (!(h->ae_algo->ops->set_tx_hwts_info &&
2032 h->ae_algo->ops->set_tx_hwts_info(h, skb)))
2035 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
2038 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
2039 struct sk_buff *skb)
2041 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2042 unsigned int type = DESC_TYPE_BOUNCE_HEAD;
2043 unsigned int size = skb_headlen(skb);
2050 if (skb->len <= ring->tx_copybreak) {
2052 type = DESC_TYPE_BOUNCE_ALL;
2055 /* hns3_can_use_tx_bounce() is called to ensure the below
2056 * function can always return the tx buffer.
2058 buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
2060 ret = skb_copy_bits(skb, 0, buf, size);
2061 if (unlikely(ret < 0)) {
2062 hns3_tx_spare_rollback(ring, cb_len);
2063 u64_stats_update_begin(&ring->syncp);
2064 ring->stats.copy_bits_err++;
2065 u64_stats_update_end(&ring->syncp);
2069 desc_cb->priv = skb;
2070 desc_cb->length = cb_len;
2072 desc_cb->type = type;
2074 bd_num += hns3_fill_desc(ring, dma, size);
2076 if (type == DESC_TYPE_BOUNCE_HEAD) {
2077 ret = hns3_fill_skb_to_desc(ring, skb,
2078 DESC_TYPE_BOUNCE_HEAD);
2079 if (unlikely(ret < 0))
2085 dma_sync_single_for_device(ring_to_dev(ring), dma, size,
2088 u64_stats_update_begin(&ring->syncp);
2089 ring->stats.tx_bounce++;
2090 u64_stats_update_end(&ring->syncp);
2094 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
2095 struct sk_buff *skb)
2097 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2098 u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
2099 struct sg_table *sgt;
2105 if (skb_has_frag_list(skb))
2106 nfrag = HNS3_MAX_TSO_BD_NUM;
2108 /* hns3_can_use_tx_sgl() is called to ensure the below
2109 * function can always return the tx buffer.
2111 sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
2114 /* scatterlist follows by the sg table */
2115 sgt->sgl = (struct scatterlist *)(sgt + 1);
2116 sg_init_table(sgt->sgl, nfrag);
2117 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
2118 if (unlikely(nents < 0)) {
2119 hns3_tx_spare_rollback(ring, cb_len);
2120 u64_stats_update_begin(&ring->syncp);
2121 ring->stats.skb2sgl_err++;
2122 u64_stats_update_end(&ring->syncp);
2126 sgt->orig_nents = nents;
2127 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
2129 if (unlikely(!sgt->nents)) {
2130 hns3_tx_spare_rollback(ring, cb_len);
2131 u64_stats_update_begin(&ring->syncp);
2132 ring->stats.map_sg_err++;
2133 u64_stats_update_end(&ring->syncp);
2137 desc_cb->priv = skb;
2138 desc_cb->length = cb_len;
2140 desc_cb->type = DESC_TYPE_SGL_SKB;
2142 for (i = 0; i < sgt->nents; i++)
2143 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
2144 sg_dma_len(sgt->sgl + i));
2146 u64_stats_update_begin(&ring->syncp);
2147 ring->stats.tx_sgl++;
2148 u64_stats_update_end(&ring->syncp);
2153 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
2154 struct sk_buff *skb)
2158 if (!ring->tx_spare)
2161 space = hns3_tx_spare_space(ring);
2163 if (hns3_can_use_tx_sgl(ring, skb, space))
2164 return hns3_handle_tx_sgl(ring, skb);
2166 if (hns3_can_use_tx_bounce(ring, skb, space))
2167 return hns3_handle_tx_bounce(ring, skb);
2170 return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
2173 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
2175 struct hns3_nic_priv *priv = netdev_priv(netdev);
2176 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
2177 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2178 struct netdev_queue *dev_queue;
2179 int pre_ntu, next_to_use_head;
2183 /* Hardware can only handle short frames above 32 bytes */
2184 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
2185 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2187 u64_stats_update_begin(&ring->syncp);
2188 ring->stats.sw_err_cnt++;
2189 u64_stats_update_end(&ring->syncp);
2191 return NETDEV_TX_OK;
2194 /* Prefetch the data used later */
2195 prefetch(skb->data);
2197 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
2198 if (unlikely(ret <= 0)) {
2199 if (ret == -EBUSY) {
2200 hns3_tx_doorbell(ring, 0, true);
2201 return NETDEV_TX_BUSY;
2204 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
2208 next_to_use_head = ring->next_to_use;
2210 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
2212 if (unlikely(ret < 0))
2215 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2216 * zero, which is unlikely, and 'ret > 0' means how many tx desc
2217 * need to be notified to the hw.
2219 ret = hns3_handle_desc_filling(ring, skb);
2220 if (unlikely(ret <= 0))
2223 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
2224 (ring->desc_num - 1);
2226 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2227 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
2229 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
2230 cpu_to_le16(BIT(HNS3_TXD_FE_B));
2231 trace_hns3_tx_desc(ring, pre_ntu);
2233 skb_tx_timestamp(skb);
2235 /* Complete translate all packets */
2236 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
2237 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
2238 netdev_xmit_more());
2239 hns3_tx_doorbell(ring, ret, doorbell);
2241 return NETDEV_TX_OK;
2244 hns3_clear_desc(ring, next_to_use_head);
2247 dev_kfree_skb_any(skb);
2248 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2249 return NETDEV_TX_OK;
2252 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
2254 struct hnae3_handle *h = hns3_get_handle(netdev);
2255 struct sockaddr *mac_addr = p;
2258 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
2259 return -EADDRNOTAVAIL;
2261 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
2262 netdev_info(netdev, "already using mac address %pM\n",
2267 /* For VF device, if there is a perm_addr, then the user will not
2268 * be allowed to change the address.
2270 if (!hns3_is_phys_func(h->pdev) &&
2271 !is_zero_ether_addr(netdev->perm_addr)) {
2272 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
2273 netdev->perm_addr, mac_addr->sa_data);
2277 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
2279 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
2283 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
2288 static int hns3_nic_do_ioctl(struct net_device *netdev,
2289 struct ifreq *ifr, int cmd)
2291 struct hnae3_handle *h = hns3_get_handle(netdev);
2293 if (!netif_running(netdev))
2296 if (!h->ae_algo->ops->do_ioctl)
2299 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
2302 static int hns3_nic_set_features(struct net_device *netdev,
2303 netdev_features_t features)
2305 netdev_features_t changed = netdev->features ^ features;
2306 struct hns3_nic_priv *priv = netdev_priv(netdev);
2307 struct hnae3_handle *h = priv->ae_handle;
2311 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
2312 enable = !!(features & NETIF_F_GRO_HW);
2313 ret = h->ae_algo->ops->set_gro_en(h, enable);
2318 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
2319 h->ae_algo->ops->enable_hw_strip_rxvtag) {
2320 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
2321 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
2326 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
2327 enable = !!(features & NETIF_F_NTUPLE);
2328 h->ae_algo->ops->enable_fd(h, enable);
2331 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
2332 h->ae_algo->ops->cls_flower_active(h)) {
2334 "there are offloaded TC filters active, cannot disable HW TC offload");
2338 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2339 h->ae_algo->ops->enable_vlan_filter) {
2340 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2341 ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
2346 netdev->features = features;
2350 static netdev_features_t hns3_features_check(struct sk_buff *skb,
2351 struct net_device *dev,
2352 netdev_features_t features)
2354 #define HNS3_MAX_HDR_LEN 480U
2355 #define HNS3_MAX_L4_HDR_LEN 60U
2359 if (skb->ip_summed != CHECKSUM_PARTIAL)
2362 if (skb->encapsulation)
2363 len = skb_inner_transport_header(skb) - skb->data;
2365 len = skb_transport_header(skb) - skb->data;
2367 /* Assume L4 is 60 byte as TCP is the only protocol with a
2368 * a flexible value, and it's max len is 60 bytes.
2370 len += HNS3_MAX_L4_HDR_LEN;
2372 /* Hardware only supports checksum on the skb with a max header
2375 if (len > HNS3_MAX_HDR_LEN)
2376 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2381 static void hns3_nic_get_stats64(struct net_device *netdev,
2382 struct rtnl_link_stats64 *stats)
2384 struct hns3_nic_priv *priv = netdev_priv(netdev);
2385 int queue_num = priv->ae_handle->kinfo.num_tqps;
2386 struct hnae3_handle *handle = priv->ae_handle;
2387 struct hns3_enet_ring *ring;
2388 u64 rx_length_errors = 0;
2389 u64 rx_crc_errors = 0;
2390 u64 rx_multicast = 0;
2402 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
2405 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
2407 for (idx = 0; idx < queue_num; idx++) {
2408 /* fetch the tx stats */
2409 ring = &priv->ring[idx];
2411 start = u64_stats_fetch_begin_irq(&ring->syncp);
2412 tx_bytes += ring->stats.tx_bytes;
2413 tx_pkts += ring->stats.tx_pkts;
2414 tx_drop += ring->stats.sw_err_cnt;
2415 tx_drop += ring->stats.tx_vlan_err;
2416 tx_drop += ring->stats.tx_l4_proto_err;
2417 tx_drop += ring->stats.tx_l2l3l4_err;
2418 tx_drop += ring->stats.tx_tso_err;
2419 tx_drop += ring->stats.over_max_recursion;
2420 tx_drop += ring->stats.hw_limitation;
2421 tx_drop += ring->stats.copy_bits_err;
2422 tx_drop += ring->stats.skb2sgl_err;
2423 tx_drop += ring->stats.map_sg_err;
2424 tx_errors += ring->stats.sw_err_cnt;
2425 tx_errors += ring->stats.tx_vlan_err;
2426 tx_errors += ring->stats.tx_l4_proto_err;
2427 tx_errors += ring->stats.tx_l2l3l4_err;
2428 tx_errors += ring->stats.tx_tso_err;
2429 tx_errors += ring->stats.over_max_recursion;
2430 tx_errors += ring->stats.hw_limitation;
2431 tx_errors += ring->stats.copy_bits_err;
2432 tx_errors += ring->stats.skb2sgl_err;
2433 tx_errors += ring->stats.map_sg_err;
2434 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2436 /* fetch the rx stats */
2437 ring = &priv->ring[idx + queue_num];
2439 start = u64_stats_fetch_begin_irq(&ring->syncp);
2440 rx_bytes += ring->stats.rx_bytes;
2441 rx_pkts += ring->stats.rx_pkts;
2442 rx_drop += ring->stats.l2_err;
2443 rx_errors += ring->stats.l2_err;
2444 rx_errors += ring->stats.l3l4_csum_err;
2445 rx_crc_errors += ring->stats.l2_err;
2446 rx_multicast += ring->stats.rx_multicast;
2447 rx_length_errors += ring->stats.err_pkt_len;
2448 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2451 stats->tx_bytes = tx_bytes;
2452 stats->tx_packets = tx_pkts;
2453 stats->rx_bytes = rx_bytes;
2454 stats->rx_packets = rx_pkts;
2456 stats->rx_errors = rx_errors;
2457 stats->multicast = rx_multicast;
2458 stats->rx_length_errors = rx_length_errors;
2459 stats->rx_crc_errors = rx_crc_errors;
2460 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
2462 stats->tx_errors = tx_errors;
2463 stats->rx_dropped = rx_drop;
2464 stats->tx_dropped = tx_drop;
2465 stats->collisions = netdev->stats.collisions;
2466 stats->rx_over_errors = netdev->stats.rx_over_errors;
2467 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
2468 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
2469 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
2470 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
2471 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
2472 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
2473 stats->tx_window_errors = netdev->stats.tx_window_errors;
2474 stats->rx_compressed = netdev->stats.rx_compressed;
2475 stats->tx_compressed = netdev->stats.tx_compressed;
2478 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2480 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2481 struct hnae3_knic_private_info *kinfo;
2482 u8 tc = mqprio_qopt->qopt.num_tc;
2483 u16 mode = mqprio_qopt->mode;
2484 u8 hw = mqprio_qopt->qopt.hw;
2485 struct hnae3_handle *h;
2487 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
2488 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
2491 if (tc > HNAE3_MAX_TC)
2497 h = hns3_get_handle(netdev);
2500 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2502 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2503 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
2506 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
2507 struct flow_cls_offload *flow)
2509 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
2510 struct hnae3_handle *h = hns3_get_handle(priv->netdev);
2512 switch (flow->command) {
2513 case FLOW_CLS_REPLACE:
2514 if (h->ae_algo->ops->add_cls_flower)
2515 return h->ae_algo->ops->add_cls_flower(h, flow, tc);
2517 case FLOW_CLS_DESTROY:
2518 if (h->ae_algo->ops->del_cls_flower)
2519 return h->ae_algo->ops->del_cls_flower(h, flow);
2528 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2531 struct hns3_nic_priv *priv = cb_priv;
2533 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
2537 case TC_SETUP_CLSFLOWER:
2538 return hns3_setup_tc_cls_flower(priv, type_data);
2544 static LIST_HEAD(hns3_block_cb_list);
2546 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2549 struct hns3_nic_priv *priv = netdev_priv(dev);
2553 case TC_SETUP_QDISC_MQPRIO:
2554 ret = hns3_setup_tc(dev, type_data);
2556 case TC_SETUP_BLOCK:
2557 ret = flow_block_cb_setup_simple(type_data,
2558 &hns3_block_cb_list,
2559 hns3_setup_tc_block_cb,
2569 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
2570 __be16 proto, u16 vid)
2572 struct hnae3_handle *h = hns3_get_handle(netdev);
2575 if (h->ae_algo->ops->set_vlan_filter)
2576 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
2581 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
2582 __be16 proto, u16 vid)
2584 struct hnae3_handle *h = hns3_get_handle(netdev);
2587 if (h->ae_algo->ops->set_vlan_filter)
2588 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
2593 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2594 u8 qos, __be16 vlan_proto)
2596 struct hnae3_handle *h = hns3_get_handle(netdev);
2599 netif_dbg(h, drv, netdev,
2600 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2601 vf, vlan, qos, ntohs(vlan_proto));
2603 if (h->ae_algo->ops->set_vf_vlan_filter)
2604 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
2610 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2612 struct hnae3_handle *handle = hns3_get_handle(netdev);
2614 if (hns3_nic_resetting(netdev))
2617 if (!handle->ae_algo->ops->set_vf_spoofchk)
2620 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
2623 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
2625 struct hnae3_handle *handle = hns3_get_handle(netdev);
2627 if (!handle->ae_algo->ops->set_vf_trust)
2630 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
2633 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
2635 struct hnae3_handle *h = hns3_get_handle(netdev);
2638 if (hns3_nic_resetting(netdev))
2641 if (!h->ae_algo->ops->set_mtu)
2644 netif_dbg(h, drv, netdev,
2645 "change mtu from %u to %d\n", netdev->mtu, new_mtu);
2647 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
2649 netdev_err(netdev, "failed to change MTU in hardware %d\n",
2652 netdev->mtu = new_mtu;
2657 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
2659 struct hns3_nic_priv *priv = netdev_priv(ndev);
2660 struct hnae3_handle *h = hns3_get_handle(ndev);
2661 struct hns3_enet_ring *tx_ring;
2662 struct napi_struct *napi;
2663 int timeout_queue = 0;
2664 int hw_head, hw_tail;
2665 int fbd_num, fbd_oft;
2666 int ebd_num, ebd_oft;
2671 /* Find the stopped queue the same way the stack does */
2672 for (i = 0; i < ndev->num_tx_queues; i++) {
2673 struct netdev_queue *q;
2674 unsigned long trans_start;
2676 q = netdev_get_tx_queue(ndev, i);
2677 trans_start = q->trans_start;
2678 if (netif_xmit_stopped(q) &&
2680 (trans_start + ndev->watchdog_timeo))) {
2682 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
2684 jiffies_to_msecs(jiffies - trans_start));
2689 if (i == ndev->num_tx_queues) {
2691 "no netdev TX timeout queue found, timeout count: %llu\n",
2692 priv->tx_timeout_count);
2696 priv->tx_timeout_count++;
2698 tx_ring = &priv->ring[timeout_queue];
2699 napi = &tx_ring->tqp_vector->napi;
2702 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2703 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
2704 tx_ring->next_to_clean, napi->state);
2707 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2708 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
2709 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
2712 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2713 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
2714 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
2716 /* When mac received many pause frames continuous, it's unable to send
2717 * packets, which may cause tx timeout
2719 if (h->ae_algo->ops->get_mac_stats) {
2720 struct hns3_mac_stats mac_stats;
2722 h->ae_algo->ops->get_mac_stats(h, &mac_stats);
2723 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2724 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
2727 hw_head = readl_relaxed(tx_ring->tqp->io_base +
2728 HNS3_RING_TX_RING_HEAD_REG);
2729 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
2730 HNS3_RING_TX_RING_TAIL_REG);
2731 fbd_num = readl_relaxed(tx_ring->tqp->io_base +
2732 HNS3_RING_TX_RING_FBDNUM_REG);
2733 fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
2734 HNS3_RING_TX_RING_OFFSET_REG);
2735 ebd_num = readl_relaxed(tx_ring->tqp->io_base +
2736 HNS3_RING_TX_RING_EBDNUM_REG);
2737 ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
2738 HNS3_RING_TX_RING_EBD_OFFSET_REG);
2739 bd_num = readl_relaxed(tx_ring->tqp->io_base +
2740 HNS3_RING_TX_RING_BD_NUM_REG);
2741 bd_err = readl_relaxed(tx_ring->tqp->io_base +
2742 HNS3_RING_TX_RING_BD_ERR_REG);
2743 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
2744 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
2747 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2748 bd_num, hw_head, hw_tail, bd_err,
2749 readl(tx_ring->tqp_vector->mask_addr));
2751 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2752 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
2757 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
2759 struct hns3_nic_priv *priv = netdev_priv(ndev);
2760 struct hnae3_handle *h = priv->ae_handle;
2762 if (!hns3_get_tx_timeo_queue_info(ndev))
2765 /* request the reset, and let the hclge to determine
2766 * which reset level should be done
2768 if (h->ae_algo->ops->reset_event)
2769 h->ae_algo->ops->reset_event(h->pdev, h);
2772 #ifdef CONFIG_RFS_ACCEL
2773 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2774 u16 rxq_index, u32 flow_id)
2776 struct hnae3_handle *h = hns3_get_handle(dev);
2777 struct flow_keys fkeys;
2779 if (!h->ae_algo->ops->add_arfs_entry)
2782 if (skb->encapsulation)
2783 return -EPROTONOSUPPORT;
2785 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
2786 return -EPROTONOSUPPORT;
2788 if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
2789 fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
2790 (fkeys.basic.ip_proto != IPPROTO_TCP &&
2791 fkeys.basic.ip_proto != IPPROTO_UDP))
2792 return -EPROTONOSUPPORT;
2794 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
2798 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
2799 struct ifla_vf_info *ivf)
2801 struct hnae3_handle *h = hns3_get_handle(ndev);
2803 if (!h->ae_algo->ops->get_vf_config)
2806 return h->ae_algo->ops->get_vf_config(h, vf, ivf);
2809 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
2812 struct hnae3_handle *h = hns3_get_handle(ndev);
2814 if (!h->ae_algo->ops->set_vf_link_state)
2817 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
2820 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
2821 int min_tx_rate, int max_tx_rate)
2823 struct hnae3_handle *h = hns3_get_handle(ndev);
2825 if (!h->ae_algo->ops->set_vf_rate)
2828 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
2832 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2834 struct hnae3_handle *h = hns3_get_handle(netdev);
2836 if (!h->ae_algo->ops->set_vf_mac)
2839 if (is_multicast_ether_addr(mac)) {
2841 "Invalid MAC:%pM specified. Could not set MAC\n",
2846 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
2849 static const struct net_device_ops hns3_nic_netdev_ops = {
2850 .ndo_open = hns3_nic_net_open,
2851 .ndo_stop = hns3_nic_net_stop,
2852 .ndo_start_xmit = hns3_nic_net_xmit,
2853 .ndo_tx_timeout = hns3_nic_net_timeout,
2854 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
2855 .ndo_do_ioctl = hns3_nic_do_ioctl,
2856 .ndo_change_mtu = hns3_nic_change_mtu,
2857 .ndo_set_features = hns3_nic_set_features,
2858 .ndo_features_check = hns3_features_check,
2859 .ndo_get_stats64 = hns3_nic_get_stats64,
2860 .ndo_setup_tc = hns3_nic_setup_tc,
2861 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
2862 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
2863 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
2864 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
2865 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
2866 .ndo_set_vf_trust = hns3_set_vf_trust,
2867 #ifdef CONFIG_RFS_ACCEL
2868 .ndo_rx_flow_steer = hns3_rx_flow_steer,
2870 .ndo_get_vf_config = hns3_nic_get_vf_config,
2871 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
2872 .ndo_set_vf_rate = hns3_nic_set_vf_rate,
2873 .ndo_set_vf_mac = hns3_nic_set_vf_mac,
2876 bool hns3_is_phys_func(struct pci_dev *pdev)
2878 u32 dev_id = pdev->device;
2881 case HNAE3_DEV_ID_GE:
2882 case HNAE3_DEV_ID_25GE:
2883 case HNAE3_DEV_ID_25GE_RDMA:
2884 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
2885 case HNAE3_DEV_ID_50GE_RDMA:
2886 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
2887 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
2888 case HNAE3_DEV_ID_200G_RDMA:
2890 case HNAE3_DEV_ID_VF:
2891 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
2894 dev_warn(&pdev->dev, "un-recognized pci device-id %u",
2901 static void hns3_disable_sriov(struct pci_dev *pdev)
2903 /* If our VFs are assigned we cannot shut down SR-IOV
2904 * without causing issues, so just leave the hardware
2905 * available but disabled
2907 if (pci_vfs_assigned(pdev)) {
2908 dev_warn(&pdev->dev,
2909 "disabling driver while VFs are assigned\n");
2913 pci_disable_sriov(pdev);
2916 /* hns3_probe - Device initialization routine
2917 * @pdev: PCI device information struct
2918 * @ent: entry in hns3_pci_tbl
2920 * hns3_probe initializes a PF identified by a pci_dev structure.
2921 * The OS initialization, configuring of the PF private structure,
2922 * and a hardware reset occur.
2924 * Returns 0 on success, negative on failure
2926 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2928 struct hnae3_ae_dev *ae_dev;
2931 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
2935 ae_dev->pdev = pdev;
2936 ae_dev->flag = ent->driver_data;
2937 pci_set_drvdata(pdev, ae_dev);
2939 ret = hnae3_register_ae_dev(ae_dev);
2941 pci_set_drvdata(pdev, NULL);
2946 /* hns3_remove - Device removal routine
2947 * @pdev: PCI device information struct
2949 static void hns3_remove(struct pci_dev *pdev)
2951 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2953 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
2954 hns3_disable_sriov(pdev);
2956 hnae3_unregister_ae_dev(ae_dev);
2957 pci_set_drvdata(pdev, NULL);
2961 * hns3_pci_sriov_configure
2962 * @pdev: pointer to a pci_dev structure
2963 * @num_vfs: number of VFs to allocate
2965 * Enable or change the number of VFs. Called when the user updates the number
2968 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
2972 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
2973 dev_warn(&pdev->dev, "Can not config SRIOV\n");
2978 ret = pci_enable_sriov(pdev, num_vfs);
2980 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
2983 } else if (!pci_vfs_assigned(pdev)) {
2984 pci_disable_sriov(pdev);
2986 dev_warn(&pdev->dev,
2987 "Unable to free VFs because some are assigned to VMs.\n");
2993 static void hns3_shutdown(struct pci_dev *pdev)
2995 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2997 hnae3_unregister_ae_dev(ae_dev);
2998 pci_set_drvdata(pdev, NULL);
3000 if (system_state == SYSTEM_POWER_OFF)
3001 pci_set_power_state(pdev, PCI_D3hot);
3004 static int __maybe_unused hns3_suspend(struct device *dev)
3006 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3008 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3009 dev_info(dev, "Begin to suspend.\n");
3010 if (ae_dev->ops && ae_dev->ops->reset_prepare)
3011 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
3017 static int __maybe_unused hns3_resume(struct device *dev)
3019 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3021 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3022 dev_info(dev, "Begin to resume.\n");
3023 if (ae_dev->ops && ae_dev->ops->reset_done)
3024 ae_dev->ops->reset_done(ae_dev);
3030 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
3031 pci_channel_state_t state)
3033 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3034 pci_ers_result_t ret;
3036 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
3038 if (state == pci_channel_io_perm_failure)
3039 return PCI_ERS_RESULT_DISCONNECT;
3041 if (!ae_dev || !ae_dev->ops) {
3043 "Can't recover - error happened before device initialized\n");
3044 return PCI_ERS_RESULT_NONE;
3047 if (ae_dev->ops->handle_hw_ras_error)
3048 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
3050 return PCI_ERS_RESULT_NONE;
3055 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
3057 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3058 const struct hnae3_ae_ops *ops;
3059 enum hnae3_reset_type reset_type;
3060 struct device *dev = &pdev->dev;
3062 if (!ae_dev || !ae_dev->ops)
3063 return PCI_ERS_RESULT_NONE;
3066 /* request the reset */
3067 if (ops->reset_event && ops->get_reset_level &&
3068 ops->set_default_reset_request) {
3069 if (ae_dev->hw_err_reset_req) {
3070 reset_type = ops->get_reset_level(ae_dev,
3071 &ae_dev->hw_err_reset_req);
3072 ops->set_default_reset_request(ae_dev, reset_type);
3073 dev_info(dev, "requesting reset due to PCI error\n");
3074 ops->reset_event(pdev, NULL);
3077 return PCI_ERS_RESULT_RECOVERED;
3080 return PCI_ERS_RESULT_DISCONNECT;
3083 static void hns3_reset_prepare(struct pci_dev *pdev)
3085 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3087 dev_info(&pdev->dev, "FLR prepare\n");
3088 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
3089 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
3092 static void hns3_reset_done(struct pci_dev *pdev)
3094 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3096 dev_info(&pdev->dev, "FLR done\n");
3097 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
3098 ae_dev->ops->reset_done(ae_dev);
3101 static const struct pci_error_handlers hns3_err_handler = {
3102 .error_detected = hns3_error_detected,
3103 .slot_reset = hns3_slot_reset,
3104 .reset_prepare = hns3_reset_prepare,
3105 .reset_done = hns3_reset_done,
3108 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
3110 static struct pci_driver hns3_driver = {
3111 .name = hns3_driver_name,
3112 .id_table = hns3_pci_tbl,
3113 .probe = hns3_probe,
3114 .remove = hns3_remove,
3115 .shutdown = hns3_shutdown,
3116 .driver.pm = &hns3_pm_ops,
3117 .sriov_configure = hns3_pci_sriov_configure,
3118 .err_handler = &hns3_err_handler,
3121 /* set default feature to hns3 */
3122 static void hns3_set_default_feature(struct net_device *netdev)
3124 struct hnae3_handle *h = hns3_get_handle(netdev);
3125 struct pci_dev *pdev = h->pdev;
3126 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3128 netdev->priv_flags |= IFF_UNICAST_FLT;
3130 netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3131 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3132 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3133 NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
3135 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3137 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3138 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3139 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3140 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3141 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3142 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3144 netdev->vlan_features |= NETIF_F_RXCSUM |
3145 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
3146 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3147 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3148 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3150 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3151 NETIF_F_HW_VLAN_CTAG_RX |
3152 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3153 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3154 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3155 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3157 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3158 netdev->hw_features |= NETIF_F_GRO_HW;
3159 netdev->features |= NETIF_F_GRO_HW;
3161 if (!(h->flags & HNAE3_SUPPORT_VF)) {
3162 netdev->hw_features |= NETIF_F_NTUPLE;
3163 netdev->features |= NETIF_F_NTUPLE;
3167 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
3168 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
3169 netdev->features |= NETIF_F_GSO_UDP_L4;
3170 netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
3171 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
3174 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) {
3175 netdev->hw_features |= NETIF_F_HW_CSUM;
3176 netdev->features |= NETIF_F_HW_CSUM;
3177 netdev->vlan_features |= NETIF_F_HW_CSUM;
3178 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
3180 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3181 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3182 netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3183 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3186 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
3187 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3188 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3189 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3190 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3193 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
3194 netdev->hw_features |= NETIF_F_HW_TC;
3195 netdev->features |= NETIF_F_HW_TC;
3198 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
3199 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3202 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
3203 struct hns3_desc_cb *cb)
3205 unsigned int order = hns3_page_order(ring);
3208 p = dev_alloc_pages(order);
3213 cb->page_offset = 0;
3215 cb->buf = page_address(p);
3216 cb->length = hns3_page_size(ring);
3217 cb->type = DESC_TYPE_PAGE;
3218 page_ref_add(p, USHRT_MAX - 1);
3219 cb->pagecnt_bias = USHRT_MAX;
3224 static void hns3_free_buffer(struct hns3_enet_ring *ring,
3225 struct hns3_desc_cb *cb, int budget)
3227 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
3228 DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
3229 napi_consume_skb(cb->priv, budget);
3230 else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
3231 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
3232 memset(cb, 0, sizeof(*cb));
3235 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
3237 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
3238 cb->length, ring_to_dma_dir(ring));
3240 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
3246 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
3247 struct hns3_desc_cb *cb)
3249 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
3250 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
3251 ring_to_dma_dir(ring));
3252 else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
3253 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
3254 ring_to_dma_dir(ring));
3255 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
3257 hns3_tx_spare_reclaim_cb(ring, cb);
3260 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
3262 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3263 ring->desc[i].addr = 0;
3266 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
3269 struct hns3_desc_cb *cb = &ring->desc_cb[i];
3271 if (!ring->desc_cb[i].dma)
3274 hns3_buffer_detach(ring, i);
3275 hns3_free_buffer(ring, cb, budget);
3278 static void hns3_free_buffers(struct hns3_enet_ring *ring)
3282 for (i = 0; i < ring->desc_num; i++)
3283 hns3_free_buffer_detach(ring, i, 0);
3286 /* free desc along with its attached buffer */
3287 static void hns3_free_desc(struct hns3_enet_ring *ring)
3289 int size = ring->desc_num * sizeof(ring->desc[0]);
3291 hns3_free_buffers(ring);
3294 dma_free_coherent(ring_to_dev(ring), size,
3295 ring->desc, ring->desc_dma_addr);
3300 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
3302 int size = ring->desc_num * sizeof(ring->desc[0]);
3304 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
3305 &ring->desc_dma_addr, GFP_KERNEL);
3312 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
3313 struct hns3_desc_cb *cb)
3317 ret = hns3_alloc_buffer(ring, cb);
3321 ret = hns3_map_buffer(ring, cb);
3328 hns3_free_buffer(ring, cb, 0);
3333 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
3335 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
3340 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
3345 /* Allocate memory for raw pkg, and map with dma */
3346 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
3350 for (i = 0; i < ring->desc_num; i++) {
3351 ret = hns3_alloc_and_attach_buffer(ring, i);
3353 goto out_buffer_fail;
3359 for (j = i - 1; j >= 0; j--)
3360 hns3_free_buffer_detach(ring, j, 0);
3364 /* detach a in-used buffer and replace with a reserved one */
3365 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
3366 struct hns3_desc_cb *res_cb)
3368 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3369 ring->desc_cb[i] = *res_cb;
3370 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
3371 ring->desc[i].rx.bd_base_info = 0;
3374 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
3376 ring->desc_cb[i].reuse_flag = 0;
3377 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3378 ring->desc_cb[i].page_offset);
3379 ring->desc[i].rx.bd_base_info = 0;
3381 dma_sync_single_for_device(ring_to_dev(ring),
3382 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
3383 hns3_buf_size(ring),
3387 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
3388 int *bytes, int *pkts, int budget)
3390 /* pair with ring->last_to_use update in hns3_tx_doorbell(),
3391 * smp_store_release() is not used in hns3_tx_doorbell() because
3392 * the doorbell operation already have the needed barrier operation.
3394 int ltu = smp_load_acquire(&ring->last_to_use);
3395 int ntc = ring->next_to_clean;
3396 struct hns3_desc_cb *desc_cb;
3397 bool reclaimed = false;
3398 struct hns3_desc *desc;
3400 while (ltu != ntc) {
3401 desc = &ring->desc[ntc];
3403 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
3404 BIT(HNS3_TXD_VLD_B))
3407 desc_cb = &ring->desc_cb[ntc];
3409 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
3410 DESC_TYPE_BOUNCE_HEAD |
3411 DESC_TYPE_SGL_SKB)) {
3413 (*bytes) += desc_cb->send_bytes;
3416 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3417 hns3_free_buffer_detach(ring, ntc, budget);
3419 if (++ntc == ring->desc_num)
3422 /* Issue prefetch for next Tx descriptor */
3423 prefetch(&ring->desc_cb[ntc]);
3427 if (unlikely(!reclaimed))
3430 /* This smp_store_release() pairs with smp_load_acquire() in
3431 * ring_space called by hns3_nic_net_xmit.
3433 smp_store_release(&ring->next_to_clean, ntc);
3435 hns3_tx_spare_update(ring);
3440 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
3442 struct net_device *netdev = ring_to_netdev(ring);
3443 struct hns3_nic_priv *priv = netdev_priv(netdev);
3444 struct netdev_queue *dev_queue;
3450 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
3453 ring->tqp_vector->tx_group.total_bytes += bytes;
3454 ring->tqp_vector->tx_group.total_packets += pkts;
3456 u64_stats_update_begin(&ring->syncp);
3457 ring->stats.tx_bytes += bytes;
3458 ring->stats.tx_pkts += pkts;
3459 u64_stats_update_end(&ring->syncp);
3461 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
3462 netdev_tx_completed_queue(dev_queue, pkts, bytes);
3464 if (unlikely(netif_carrier_ok(netdev) &&
3465 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
3466 /* Make sure that anybody stopping the queue after this
3467 * sees the new next_to_clean.
3470 if (netif_tx_queue_stopped(dev_queue) &&
3471 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
3472 netif_tx_wake_queue(dev_queue);
3473 ring->stats.restart_queue++;
3478 static int hns3_desc_unused(struct hns3_enet_ring *ring)
3480 int ntc = ring->next_to_clean;
3481 int ntu = ring->next_to_use;
3483 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
3486 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
3489 struct hns3_desc_cb *desc_cb;
3490 struct hns3_desc_cb res_cbs;
3493 for (i = 0; i < cleand_count; i++) {
3494 desc_cb = &ring->desc_cb[ring->next_to_use];
3495 if (desc_cb->reuse_flag) {
3496 u64_stats_update_begin(&ring->syncp);
3497 ring->stats.reuse_pg_cnt++;
3498 u64_stats_update_end(&ring->syncp);
3500 hns3_reuse_buffer(ring, ring->next_to_use);
3502 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
3504 u64_stats_update_begin(&ring->syncp);
3505 ring->stats.sw_err_cnt++;
3506 u64_stats_update_end(&ring->syncp);
3508 hns3_rl_err(ring_to_netdev(ring),
3509 "alloc rx buffer failed: %d\n",
3513 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
3515 u64_stats_update_begin(&ring->syncp);
3516 ring->stats.non_reuse_pg++;
3517 u64_stats_update_end(&ring->syncp);
3520 ring_ptr_move_fw(ring, next_to_use);
3523 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
3526 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
3528 return page_count(cb->priv) == cb->pagecnt_bias;
3531 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3532 struct hns3_enet_ring *ring, int pull_len,
3533 struct hns3_desc_cb *desc_cb)
3535 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3536 u32 frag_offset = desc_cb->page_offset + pull_len;
3537 int size = le16_to_cpu(desc->rx.size);
3538 u32 truesize = hns3_buf_size(ring);
3539 u32 frag_size = size - pull_len;
3542 /* Avoid re-using remote or pfmem page */
3543 if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
3546 reused = hns3_can_reuse_page(desc_cb);
3548 /* Rx page can be reused when:
3549 * 1. Rx page is only owned by the driver when page_offset
3550 * is zero, which means 0 @ truesize will be used by
3551 * stack after skb_add_rx_frag() is called, and the rest
3552 * of rx page can be reused by driver.
3554 * 2. Rx page is only owned by the driver when page_offset
3555 * is non-zero, which means page_offset @ truesize will
3556 * be used by stack after skb_add_rx_frag() is called,
3557 * and 0 @ truesize can be reused by driver.
3559 if ((!desc_cb->page_offset && reused) ||
3560 ((desc_cb->page_offset + truesize + truesize) <=
3561 hns3_page_size(ring) && desc_cb->page_offset)) {
3562 desc_cb->page_offset += truesize;
3563 desc_cb->reuse_flag = 1;
3564 } else if (desc_cb->page_offset && reused) {
3565 desc_cb->page_offset = 0;
3566 desc_cb->reuse_flag = 1;
3567 } else if (frag_size <= ring->rx_copybreak) {
3568 void *frag = napi_alloc_frag(frag_size);
3570 if (unlikely(!frag)) {
3571 u64_stats_update_begin(&ring->syncp);
3572 ring->stats.frag_alloc_err++;
3573 u64_stats_update_end(&ring->syncp);
3575 hns3_rl_err(ring_to_netdev(ring),
3576 "failed to allocate rx frag\n");
3580 desc_cb->reuse_flag = 1;
3581 memcpy(frag, desc_cb->buf + frag_offset, frag_size);
3582 skb_add_rx_frag(skb, i, virt_to_page(frag),
3583 offset_in_page(frag), frag_size, frag_size);
3585 u64_stats_update_begin(&ring->syncp);
3586 ring->stats.frag_alloc++;
3587 u64_stats_update_end(&ring->syncp);
3592 desc_cb->pagecnt_bias--;
3594 if (unlikely(!desc_cb->pagecnt_bias)) {
3595 page_ref_add(desc_cb->priv, USHRT_MAX);
3596 desc_cb->pagecnt_bias = USHRT_MAX;
3599 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3600 frag_size, truesize);
3602 if (unlikely(!desc_cb->reuse_flag))
3603 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
3606 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
3608 __be16 type = skb->protocol;
3612 while (eth_type_vlan(type)) {
3613 struct vlan_hdr *vh;
3615 if ((depth + VLAN_HLEN) > skb_headlen(skb))
3618 vh = (struct vlan_hdr *)(skb->data + depth);
3619 type = vh->h_vlan_encapsulated_proto;
3623 skb_set_network_header(skb, depth);
3625 if (type == htons(ETH_P_IP)) {
3626 const struct iphdr *iph = ip_hdr(skb);
3628 depth += sizeof(struct iphdr);
3629 skb_set_transport_header(skb, depth);
3631 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
3633 } else if (type == htons(ETH_P_IPV6)) {
3634 const struct ipv6hdr *iph = ipv6_hdr(skb);
3636 depth += sizeof(struct ipv6hdr);
3637 skb_set_transport_header(skb, depth);
3639 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
3642 hns3_rl_err(skb->dev,
3643 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3644 be16_to_cpu(type), depth);
3648 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
3650 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
3652 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
3653 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
3655 skb->csum_start = (unsigned char *)th - skb->head;
3656 skb->csum_offset = offsetof(struct tcphdr, check);
3657 skb->ip_summed = CHECKSUM_PARTIAL;
3659 trace_hns3_gro(skb);
3664 static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
3665 struct sk_buff *skb, u32 ptype, u16 csum)
3667 if (ptype == HNS3_INVALID_PTYPE ||
3668 hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
3671 u64_stats_update_begin(&ring->syncp);
3672 ring->stats.csum_complete++;
3673 u64_stats_update_end(&ring->syncp);
3674 skb->ip_summed = CHECKSUM_COMPLETE;
3675 skb->csum = csum_unfold((__force __sum16)csum);
3680 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
3681 u32 ol_info, u32 ptype)
3683 int l3_type, l4_type;
3686 if (ptype != HNS3_INVALID_PTYPE) {
3687 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
3688 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
3693 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
3696 case HNS3_OL4_TYPE_MAC_IN_UDP:
3697 case HNS3_OL4_TYPE_NVGRE:
3698 skb->csum_level = 1;
3700 case HNS3_OL4_TYPE_NO_TUN:
3701 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
3703 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
3705 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3706 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
3707 l3_type == HNS3_L3_TYPE_IPV6) &&
3708 (l4_type == HNS3_L4_TYPE_UDP ||
3709 l4_type == HNS3_L4_TYPE_TCP ||
3710 l4_type == HNS3_L4_TYPE_SCTP))
3711 skb->ip_summed = CHECKSUM_UNNECESSARY;
3718 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
3719 u32 l234info, u32 bd_base_info, u32 ol_info,
3722 struct net_device *netdev = ring_to_netdev(ring);
3723 struct hns3_nic_priv *priv = netdev_priv(netdev);
3724 u32 ptype = HNS3_INVALID_PTYPE;
3726 skb->ip_summed = CHECKSUM_NONE;
3728 skb_checksum_none_assert(skb);
3730 if (!(netdev->features & NETIF_F_RXCSUM))
3733 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
3734 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
3737 if (hns3_checksum_complete(ring, skb, ptype, csum))
3740 /* check if hardware has done checksum */
3741 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
3744 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
3745 BIT(HNS3_RXD_OL3E_B) |
3746 BIT(HNS3_RXD_OL4E_B)))) {
3747 u64_stats_update_begin(&ring->syncp);
3748 ring->stats.l3l4_csum_err++;
3749 u64_stats_update_end(&ring->syncp);
3754 hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
3757 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
3759 if (skb_has_frag_list(skb))
3760 napi_gro_flush(&ring->tqp_vector->napi, false);
3762 napi_gro_receive(&ring->tqp_vector->napi, skb);
3765 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
3766 struct hns3_desc *desc, u32 l234info,
3769 struct hnae3_handle *handle = ring->tqp->handle;
3770 struct pci_dev *pdev = ring->tqp->handle->pdev;
3771 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3773 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
3774 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3775 if (!(*vlan_tag & VLAN_VID_MASK))
3776 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3778 return (*vlan_tag != 0);
3781 #define HNS3_STRP_OUTER_VLAN 0x1
3782 #define HNS3_STRP_INNER_VLAN 0x2
3783 #define HNS3_STRP_BOTH 0x3
3785 /* Hardware always insert VLAN tag into RX descriptor when
3786 * remove the tag from packet, driver needs to determine
3787 * reporting which tag to stack.
3789 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
3790 HNS3_RXD_STRP_TAGP_S)) {
3791 case HNS3_STRP_OUTER_VLAN:
3792 if (handle->port_base_vlan_state !=
3793 HNAE3_PORT_BASE_VLAN_DISABLE)
3796 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3798 case HNS3_STRP_INNER_VLAN:
3799 if (handle->port_base_vlan_state !=
3800 HNAE3_PORT_BASE_VLAN_DISABLE)
3803 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3805 case HNS3_STRP_BOTH:
3806 if (handle->port_base_vlan_state ==
3807 HNAE3_PORT_BASE_VLAN_DISABLE)
3808 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3810 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3818 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
3820 ring->desc[ring->next_to_clean].rx.bd_base_info &=
3821 cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
3822 ring->next_to_clean += 1;
3824 if (unlikely(ring->next_to_clean == ring->desc_num))
3825 ring->next_to_clean = 0;
3828 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
3831 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
3832 struct net_device *netdev = ring_to_netdev(ring);
3833 struct sk_buff *skb;
3835 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
3837 if (unlikely(!skb)) {
3838 hns3_rl_err(netdev, "alloc rx skb fail\n");
3840 u64_stats_update_begin(&ring->syncp);
3841 ring->stats.sw_err_cnt++;
3842 u64_stats_update_end(&ring->syncp);
3847 trace_hns3_rx_desc(ring);
3848 prefetchw(skb->data);
3850 ring->pending_buf = 1;
3852 ring->tail_skb = NULL;
3853 if (length <= HNS3_RX_HEAD_SIZE) {
3854 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
3856 /* We can reuse buffer as-is, just make sure it is reusable */
3857 if (dev_page_is_reusable(desc_cb->priv))
3858 desc_cb->reuse_flag = 1;
3859 else /* This page cannot be reused so discard it */
3860 __page_frag_cache_drain(desc_cb->priv,
3861 desc_cb->pagecnt_bias);
3863 hns3_rx_ring_move_fw(ring);
3866 u64_stats_update_begin(&ring->syncp);
3867 ring->stats.seg_pkt_cnt++;
3868 u64_stats_update_end(&ring->syncp);
3870 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
3871 __skb_put(skb, ring->pull_len);
3872 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
3874 hns3_rx_ring_move_fw(ring);
3879 static int hns3_add_frag(struct hns3_enet_ring *ring)
3881 struct sk_buff *skb = ring->skb;
3882 struct sk_buff *head_skb = skb;
3883 struct sk_buff *new_skb;
3884 struct hns3_desc_cb *desc_cb;
3885 struct hns3_desc *desc;
3889 desc = &ring->desc[ring->next_to_clean];
3890 desc_cb = &ring->desc_cb[ring->next_to_clean];
3891 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
3892 /* make sure HW write desc complete */
3894 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
3897 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
3898 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
3899 if (unlikely(!new_skb)) {
3900 hns3_rl_err(ring_to_netdev(ring),
3901 "alloc rx fraglist skb fail\n");
3906 if (ring->tail_skb) {
3907 ring->tail_skb->next = new_skb;
3908 ring->tail_skb = new_skb;
3910 skb_shinfo(skb)->frag_list = new_skb;
3911 ring->tail_skb = new_skb;
3915 if (ring->tail_skb) {
3916 head_skb->truesize += hns3_buf_size(ring);
3917 head_skb->data_len += le16_to_cpu(desc->rx.size);
3918 head_skb->len += le16_to_cpu(desc->rx.size);
3919 skb = ring->tail_skb;
3922 dma_sync_single_for_cpu(ring_to_dev(ring),
3923 desc_cb->dma + desc_cb->page_offset,
3924 hns3_buf_size(ring),
3927 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
3928 trace_hns3_rx_desc(ring);
3929 hns3_rx_ring_move_fw(ring);
3930 ring->pending_buf++;
3931 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
3936 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
3937 struct sk_buff *skb, u32 l234info,
3938 u32 bd_base_info, u32 ol_info, u16 csum)
3940 struct net_device *netdev = ring_to_netdev(ring);
3941 struct hns3_nic_priv *priv = netdev_priv(netdev);
3944 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
3945 HNS3_RXD_GRO_SIZE_M,
3946 HNS3_RXD_GRO_SIZE_S);
3947 /* if there is no HW GRO, do not set gro params */
3948 if (!skb_shinfo(skb)->gso_size) {
3949 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
3954 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
3955 HNS3_RXD_GRO_COUNT_M,
3956 HNS3_RXD_GRO_COUNT_S);
3958 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
3959 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
3962 l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
3964 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
3968 if (l3_type == HNS3_L3_TYPE_IPV4)
3969 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3970 else if (l3_type == HNS3_L3_TYPE_IPV6)
3971 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3975 return hns3_gro_complete(skb, l234info);
3978 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
3979 struct sk_buff *skb, u32 rss_hash)
3981 struct hnae3_handle *handle = ring->tqp->handle;
3982 enum pkt_hash_types rss_type;
3985 rss_type = handle->kinfo.rss_type;
3987 rss_type = PKT_HASH_TYPE_NONE;
3989 skb_set_hash(skb, rss_hash, rss_type);
3992 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
3994 struct net_device *netdev = ring_to_netdev(ring);
3995 enum hns3_pkt_l2t_type l2_frame_type;
3996 u32 bd_base_info, l234info, ol_info;
3997 struct hns3_desc *desc;
4002 /* bdinfo handled below is only valid on the last BD of the
4003 * current packet, and ring->next_to_clean indicates the first
4004 * descriptor of next packet, so need - 1 below.
4006 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
4007 (ring->desc_num - 1);
4008 desc = &ring->desc[pre_ntc];
4009 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4010 l234info = le32_to_cpu(desc->rx.l234_info);
4011 ol_info = le32_to_cpu(desc->rx.ol_info);
4012 csum = le16_to_cpu(desc->csum);
4014 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
4015 struct hnae3_handle *h = hns3_get_handle(netdev);
4016 u32 nsec = le32_to_cpu(desc->ts_nsec);
4017 u32 sec = le32_to_cpu(desc->ts_sec);
4019 if (h->ae_algo->ops->get_rx_hwts)
4020 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
4023 /* Based on hw strategy, the tag offloaded will be stored at
4024 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4025 * in one layer tag case.
4027 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
4030 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
4031 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
4035 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
4036 BIT(HNS3_RXD_L2E_B))))) {
4037 u64_stats_update_begin(&ring->syncp);
4038 if (l234info & BIT(HNS3_RXD_L2E_B))
4039 ring->stats.l2_err++;
4041 ring->stats.err_pkt_len++;
4042 u64_stats_update_end(&ring->syncp);
4049 /* Do update ip stack process */
4050 skb->protocol = eth_type_trans(skb, netdev);
4052 /* This is needed in order to enable forwarding support */
4053 ret = hns3_set_gro_and_checksum(ring, skb, l234info,
4054 bd_base_info, ol_info, csum);
4055 if (unlikely(ret)) {
4056 u64_stats_update_begin(&ring->syncp);
4057 ring->stats.rx_err_cnt++;
4058 u64_stats_update_end(&ring->syncp);
4062 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
4065 u64_stats_update_begin(&ring->syncp);
4066 ring->stats.rx_pkts++;
4067 ring->stats.rx_bytes += len;
4069 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
4070 ring->stats.rx_multicast++;
4072 u64_stats_update_end(&ring->syncp);
4074 ring->tqp_vector->rx_group.total_bytes += len;
4076 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
4080 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
4082 struct sk_buff *skb = ring->skb;
4083 struct hns3_desc_cb *desc_cb;
4084 struct hns3_desc *desc;
4085 unsigned int length;
4089 desc = &ring->desc[ring->next_to_clean];
4090 desc_cb = &ring->desc_cb[ring->next_to_clean];
4095 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4096 /* Check valid BD */
4097 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
4101 length = le16_to_cpu(desc->rx.size);
4103 ring->va = desc_cb->buf + desc_cb->page_offset;
4105 dma_sync_single_for_cpu(ring_to_dev(ring),
4106 desc_cb->dma + desc_cb->page_offset,
4107 hns3_buf_size(ring),
4110 /* Prefetch first cache line of first page.
4111 * Idea is to cache few bytes of the header of the packet.
4112 * Our L1 Cache line size is 64B so need to prefetch twice to make
4113 * it 128B. But in actual we can have greater size of caches with
4114 * 128B Level 1 cache lines. In such a case, single fetch would
4115 * suffice to cache in the relevant part of the header.
4117 net_prefetch(ring->va);
4119 ret = hns3_alloc_skb(ring, length, ring->va);
4122 if (ret < 0) /* alloc buffer fail */
4124 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
4125 ret = hns3_add_frag(ring);
4130 ret = hns3_add_frag(ring);
4135 /* As the head data may be changed when GRO enable, copy
4136 * the head data in after other data rx completed
4138 if (skb->len > HNS3_RX_HEAD_SIZE)
4139 memcpy(skb->data, ring->va,
4140 ALIGN(ring->pull_len, sizeof(long)));
4142 ret = hns3_handle_bdinfo(ring, skb);
4143 if (unlikely(ret)) {
4144 dev_kfree_skb_any(skb);
4148 skb_record_rx_queue(skb, ring->tqp->tqp_index);
4152 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
4153 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
4155 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4156 int unused_count = hns3_desc_unused(ring);
4160 unused_count -= ring->pending_buf;
4162 while (recv_pkts < budget) {
4163 /* Reuse or realloc buffers */
4164 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
4165 hns3_nic_alloc_rx_buffers(ring, unused_count);
4166 unused_count = hns3_desc_unused(ring) -
4171 err = hns3_handle_rx_bd(ring);
4172 /* Do not get FE for the packet or failed to alloc skb */
4173 if (unlikely(!ring->skb || err == -ENXIO)) {
4175 } else if (likely(!err)) {
4176 rx_fn(ring, ring->skb);
4180 unused_count += ring->pending_buf;
4182 ring->pending_buf = 0;
4186 /* Make all data has been write before submit */
4187 if (unused_count > 0)
4188 hns3_nic_alloc_rx_buffers(ring, unused_count);
4193 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4195 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
4196 struct dim_sample sample = {};
4198 if (!rx_group->coal.adapt_enable)
4201 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
4202 rx_group->total_bytes, &sample);
4203 net_dim(&rx_group->dim, sample);
4206 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4208 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
4209 struct dim_sample sample = {};
4211 if (!tx_group->coal.adapt_enable)
4214 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
4215 tx_group->total_bytes, &sample);
4216 net_dim(&tx_group->dim, sample);
4219 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
4221 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
4222 struct hns3_enet_ring *ring;
4223 int rx_pkt_total = 0;
4225 struct hns3_enet_tqp_vector *tqp_vector =
4226 container_of(napi, struct hns3_enet_tqp_vector, napi);
4227 bool clean_complete = true;
4228 int rx_budget = budget;
4230 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4231 napi_complete(napi);
4235 /* Since the actual Tx work is minimal, we can give the Tx a larger
4236 * budget and be more aggressive about cleaning up the Tx descriptors.
4238 hns3_for_each_ring(ring, tqp_vector->tx_group)
4239 hns3_clean_tx_ring(ring, budget);
4241 /* make sure rx ring budget not smaller than 1 */
4242 if (tqp_vector->num_tqps > 1)
4243 rx_budget = max(budget / tqp_vector->num_tqps, 1);
4245 hns3_for_each_ring(ring, tqp_vector->rx_group) {
4246 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
4248 if (rx_cleaned >= rx_budget)
4249 clean_complete = false;
4251 rx_pkt_total += rx_cleaned;
4254 tqp_vector->rx_group.total_packets += rx_pkt_total;
4256 if (!clean_complete)
4259 if (napi_complete(napi) &&
4260 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4261 hns3_update_rx_int_coalesce(tqp_vector);
4262 hns3_update_tx_int_coalesce(tqp_vector);
4264 hns3_mask_vector_irq(tqp_vector, 1);
4267 return rx_pkt_total;
4270 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4271 struct hnae3_ring_chain_node *head)
4273 struct pci_dev *pdev = tqp_vector->handle->pdev;
4274 struct hnae3_ring_chain_node *cur_chain = head;
4275 struct hnae3_ring_chain_node *chain;
4276 struct hns3_enet_ring *tx_ring;
4277 struct hns3_enet_ring *rx_ring;
4279 tx_ring = tqp_vector->tx_group.ring;
4281 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
4282 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
4283 HNAE3_RING_TYPE_TX);
4284 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
4285 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
4287 cur_chain->next = NULL;
4289 while (tx_ring->next) {
4290 tx_ring = tx_ring->next;
4292 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
4295 goto err_free_chain;
4297 cur_chain->next = chain;
4298 chain->tqp_index = tx_ring->tqp->tqp_index;
4299 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
4300 HNAE3_RING_TYPE_TX);
4301 hnae3_set_field(chain->int_gl_idx,
4302 HNAE3_RING_GL_IDX_M,
4303 HNAE3_RING_GL_IDX_S,
4310 rx_ring = tqp_vector->rx_group.ring;
4311 if (!tx_ring && rx_ring) {
4312 cur_chain->next = NULL;
4313 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
4314 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
4315 HNAE3_RING_TYPE_RX);
4316 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
4317 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
4319 rx_ring = rx_ring->next;
4323 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
4325 goto err_free_chain;
4327 cur_chain->next = chain;
4328 chain->tqp_index = rx_ring->tqp->tqp_index;
4329 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
4330 HNAE3_RING_TYPE_RX);
4331 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
4332 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
4336 rx_ring = rx_ring->next;
4342 cur_chain = head->next;
4344 chain = cur_chain->next;
4345 devm_kfree(&pdev->dev, cur_chain);
4353 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4354 struct hnae3_ring_chain_node *head)
4356 struct pci_dev *pdev = tqp_vector->handle->pdev;
4357 struct hnae3_ring_chain_node *chain_tmp, *chain;
4362 chain_tmp = chain->next;
4363 devm_kfree(&pdev->dev, chain);
4368 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
4369 struct hns3_enet_ring *ring)
4371 ring->next = group->ring;
4377 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
4379 struct pci_dev *pdev = priv->ae_handle->pdev;
4380 struct hns3_enet_tqp_vector *tqp_vector;
4381 int num_vectors = priv->vector_num;
4385 numa_node = dev_to_node(&pdev->dev);
4387 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
4388 tqp_vector = &priv->tqp_vector[vector_i];
4389 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
4390 &tqp_vector->affinity_mask);
4394 static void hns3_rx_dim_work(struct work_struct *work)
4396 struct dim *dim = container_of(work, struct dim, work);
4397 struct hns3_enet_ring_group *group = container_of(dim,
4398 struct hns3_enet_ring_group, dim);
4399 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4400 struct dim_cq_moder cur_moder =
4401 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
4403 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
4404 tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
4406 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
4407 hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
4408 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
4411 dim->state = DIM_START_MEASURE;
4414 static void hns3_tx_dim_work(struct work_struct *work)
4416 struct dim *dim = container_of(work, struct dim, work);
4417 struct hns3_enet_ring_group *group = container_of(dim,
4418 struct hns3_enet_ring_group, dim);
4419 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4420 struct dim_cq_moder cur_moder =
4421 net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
4423 hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
4424 tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
4426 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
4427 hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
4428 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
4431 dim->state = DIM_START_MEASURE;
4434 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
4436 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
4437 tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4438 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
4439 tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4442 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
4444 struct hnae3_handle *h = priv->ae_handle;
4445 struct hns3_enet_tqp_vector *tqp_vector;
4449 hns3_nic_set_cpumask(priv);
4451 for (i = 0; i < priv->vector_num; i++) {
4452 tqp_vector = &priv->tqp_vector[i];
4453 hns3_vector_coalesce_init_hw(tqp_vector, priv);
4454 tqp_vector->num_tqps = 0;
4455 hns3_nic_init_dim(tqp_vector);
4458 for (i = 0; i < h->kinfo.num_tqps; i++) {
4459 u16 vector_i = i % priv->vector_num;
4460 u16 tqp_num = h->kinfo.num_tqps;
4462 tqp_vector = &priv->tqp_vector[vector_i];
4464 hns3_add_ring_to_group(&tqp_vector->tx_group,
4467 hns3_add_ring_to_group(&tqp_vector->rx_group,
4468 &priv->ring[i + tqp_num]);
4470 priv->ring[i].tqp_vector = tqp_vector;
4471 priv->ring[i + tqp_num].tqp_vector = tqp_vector;
4472 tqp_vector->num_tqps++;
4475 for (i = 0; i < priv->vector_num; i++) {
4476 struct hnae3_ring_chain_node vector_ring_chain;
4478 tqp_vector = &priv->tqp_vector[i];
4480 tqp_vector->rx_group.total_bytes = 0;
4481 tqp_vector->rx_group.total_packets = 0;
4482 tqp_vector->tx_group.total_bytes = 0;
4483 tqp_vector->tx_group.total_packets = 0;
4484 tqp_vector->handle = h;
4486 ret = hns3_get_vector_ring_chain(tqp_vector,
4487 &vector_ring_chain);
4491 ret = h->ae_algo->ops->map_ring_to_vector(h,
4492 tqp_vector->vector_irq, &vector_ring_chain);
4494 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
4499 netif_napi_add(priv->netdev, &tqp_vector->napi,
4500 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
4507 netif_napi_del(&priv->tqp_vector[i].napi);
4512 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
4514 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
4515 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
4516 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
4518 /* initialize the configuration for interrupt coalescing.
4519 * 1. GL (Interrupt Gap Limiter)
4520 * 2. RL (Interrupt Rate Limiter)
4521 * 3. QL (Interrupt Quantity Limiter)
4523 * Default: enable interrupt coalescing self-adaptive and GL
4525 tx_coal->adapt_enable = 1;
4526 rx_coal->adapt_enable = 1;
4528 tx_coal->int_gl = HNS3_INT_GL_50K;
4529 rx_coal->int_gl = HNS3_INT_GL_50K;
4531 rx_coal->flow_level = HNS3_FLOW_LOW;
4532 tx_coal->flow_level = HNS3_FLOW_LOW;
4534 if (ae_dev->dev_specs.int_ql_max) {
4535 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4536 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4540 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
4542 struct hnae3_handle *h = priv->ae_handle;
4543 struct hns3_enet_tqp_vector *tqp_vector;
4544 struct hnae3_vector_info *vector;
4545 struct pci_dev *pdev = h->pdev;
4546 u16 tqp_num = h->kinfo.num_tqps;
4551 /* RSS size, cpu online and vector_num should be the same */
4552 /* Should consider 2p/4p later */
4553 vector_num = min_t(u16, num_online_cpus(), tqp_num);
4555 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
4560 /* save the actual available vector number */
4561 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
4563 priv->vector_num = vector_num;
4564 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
4565 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
4567 if (!priv->tqp_vector) {
4572 for (i = 0; i < priv->vector_num; i++) {
4573 tqp_vector = &priv->tqp_vector[i];
4574 tqp_vector->idx = i;
4575 tqp_vector->mask_addr = vector[i].io_addr;
4576 tqp_vector->vector_irq = vector[i].vector;
4577 hns3_vector_coalesce_init(tqp_vector, priv);
4581 devm_kfree(&pdev->dev, vector);
4585 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
4591 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
4593 struct hnae3_ring_chain_node vector_ring_chain;
4594 struct hnae3_handle *h = priv->ae_handle;
4595 struct hns3_enet_tqp_vector *tqp_vector;
4598 for (i = 0; i < priv->vector_num; i++) {
4599 tqp_vector = &priv->tqp_vector[i];
4601 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
4604 /* Since the mapping can be overwritten, when fail to get the
4605 * chain between vector and ring, we should go on to deal with
4606 * the remaining options.
4608 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
4609 dev_warn(priv->dev, "failed to get ring chain\n");
4611 h->ae_algo->ops->unmap_ring_from_vector(h,
4612 tqp_vector->vector_irq, &vector_ring_chain);
4614 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
4616 hns3_clear_ring_group(&tqp_vector->rx_group);
4617 hns3_clear_ring_group(&tqp_vector->tx_group);
4618 netif_napi_del(&priv->tqp_vector[i].napi);
4622 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
4624 struct hnae3_handle *h = priv->ae_handle;
4625 struct pci_dev *pdev = h->pdev;
4628 for (i = 0; i < priv->vector_num; i++) {
4629 struct hns3_enet_tqp_vector *tqp_vector;
4631 tqp_vector = &priv->tqp_vector[i];
4632 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
4637 devm_kfree(&pdev->dev, priv->tqp_vector);
4640 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
4641 unsigned int ring_type)
4643 int queue_num = priv->ae_handle->kinfo.num_tqps;
4644 struct hns3_enet_ring *ring;
4647 if (ring_type == HNAE3_RING_TYPE_TX) {
4648 ring = &priv->ring[q->tqp_index];
4649 desc_num = priv->ae_handle->kinfo.num_tx_desc;
4650 ring->queue_index = q->tqp_index;
4651 ring->tx_copybreak = priv->tx_copybreak;
4652 ring->last_to_use = 0;
4654 ring = &priv->ring[q->tqp_index + queue_num];
4655 desc_num = priv->ae_handle->kinfo.num_rx_desc;
4656 ring->queue_index = q->tqp_index;
4657 ring->rx_copybreak = priv->rx_copybreak;
4660 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
4664 ring->desc_cb = NULL;
4665 ring->dev = priv->dev;
4666 ring->desc_dma_addr = 0;
4667 ring->buf_size = q->buf_size;
4668 ring->desc_num = desc_num;
4669 ring->next_to_use = 0;
4670 ring->next_to_clean = 0;
4673 static void hns3_queue_to_ring(struct hnae3_queue *tqp,
4674 struct hns3_nic_priv *priv)
4676 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
4677 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
4680 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
4682 struct hnae3_handle *h = priv->ae_handle;
4683 struct pci_dev *pdev = h->pdev;
4686 priv->ring = devm_kzalloc(&pdev->dev,
4687 array3_size(h->kinfo.num_tqps,
4688 sizeof(*priv->ring), 2),
4693 for (i = 0; i < h->kinfo.num_tqps; i++)
4694 hns3_queue_to_ring(h->kinfo.tqp[i], priv);
4699 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
4704 devm_kfree(priv->dev, priv->ring);
4708 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
4712 if (ring->desc_num <= 0 || ring->buf_size <= 0)
4715 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
4716 sizeof(ring->desc_cb[0]), GFP_KERNEL);
4717 if (!ring->desc_cb) {
4722 ret = hns3_alloc_desc(ring);
4724 goto out_with_desc_cb;
4726 if (!HNAE3_IS_TX_RING(ring)) {
4727 ret = hns3_alloc_ring_buffers(ring);
4731 hns3_init_tx_spare_buffer(ring);
4737 hns3_free_desc(ring);
4739 devm_kfree(ring_to_dev(ring), ring->desc_cb);
4740 ring->desc_cb = NULL;
4745 void hns3_fini_ring(struct hns3_enet_ring *ring)
4747 hns3_free_desc(ring);
4748 devm_kfree(ring_to_dev(ring), ring->desc_cb);
4749 ring->desc_cb = NULL;
4750 ring->next_to_clean = 0;
4751 ring->next_to_use = 0;
4752 ring->last_to_use = 0;
4753 ring->pending_buf = 0;
4754 if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
4755 dev_kfree_skb_any(ring->skb);
4757 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
4758 struct hns3_tx_spare *tx_spare = ring->tx_spare;
4760 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
4762 free_pages((unsigned long)tx_spare->buf,
4763 get_order(tx_spare->len));
4764 devm_kfree(ring_to_dev(ring), tx_spare);
4765 ring->tx_spare = NULL;
4769 static int hns3_buf_size2type(u32 buf_size)
4775 bd_size_type = HNS3_BD_SIZE_512_TYPE;
4778 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
4781 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4784 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
4787 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4790 return bd_size_type;
4793 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
4795 dma_addr_t dma = ring->desc_dma_addr;
4796 struct hnae3_queue *q = ring->tqp;
4798 if (!HNAE3_IS_TX_RING(ring)) {
4799 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
4800 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
4801 (u32)((dma >> 31) >> 1));
4803 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
4804 hns3_buf_size2type(ring->buf_size));
4805 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
4806 ring->desc_num / 8 - 1);
4808 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
4810 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
4811 (u32)((dma >> 31) >> 1));
4813 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
4814 ring->desc_num / 8 - 1);
4818 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
4820 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
4821 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
4824 for (i = 0; i < HNAE3_MAX_TC; i++) {
4827 if (!test_bit(i, &tc_info->tc_en))
4830 for (j = 0; j < tc_info->tqp_count[i]; j++) {
4831 struct hnae3_queue *q;
4833 q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
4834 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
4839 int hns3_init_all_ring(struct hns3_nic_priv *priv)
4841 struct hnae3_handle *h = priv->ae_handle;
4842 int ring_num = h->kinfo.num_tqps * 2;
4846 for (i = 0; i < ring_num; i++) {
4847 ret = hns3_alloc_ring_memory(&priv->ring[i]);
4850 "Alloc ring memory fail! ret=%d\n", ret);
4851 goto out_when_alloc_ring_memory;
4854 u64_stats_init(&priv->ring[i].syncp);
4859 out_when_alloc_ring_memory:
4860 for (j = i - 1; j >= 0; j--)
4861 hns3_fini_ring(&priv->ring[j]);
4866 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
4868 struct hnae3_handle *h = priv->ae_handle;
4871 for (i = 0; i < h->kinfo.num_tqps; i++) {
4872 hns3_fini_ring(&priv->ring[i]);
4873 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
4877 /* Set mac addr if it is configured. or leave it to the AE driver */
4878 static int hns3_init_mac_addr(struct net_device *netdev)
4880 struct hns3_nic_priv *priv = netdev_priv(netdev);
4881 struct hnae3_handle *h = priv->ae_handle;
4882 u8 mac_addr_temp[ETH_ALEN];
4885 if (h->ae_algo->ops->get_mac_addr)
4886 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
4888 /* Check if the MAC address is valid, if not get a random one */
4889 if (!is_valid_ether_addr(mac_addr_temp)) {
4890 eth_hw_addr_random(netdev);
4891 dev_warn(priv->dev, "using random MAC address %pM\n",
4893 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
4894 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
4895 ether_addr_copy(netdev->perm_addr, mac_addr_temp);
4900 if (h->ae_algo->ops->set_mac_addr)
4901 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
4906 static int hns3_init_phy(struct net_device *netdev)
4908 struct hnae3_handle *h = hns3_get_handle(netdev);
4911 if (h->ae_algo->ops->mac_connect_phy)
4912 ret = h->ae_algo->ops->mac_connect_phy(h);
4917 static void hns3_uninit_phy(struct net_device *netdev)
4919 struct hnae3_handle *h = hns3_get_handle(netdev);
4921 if (h->ae_algo->ops->mac_disconnect_phy)
4922 h->ae_algo->ops->mac_disconnect_phy(h);
4925 static int hns3_client_start(struct hnae3_handle *handle)
4927 if (!handle->ae_algo->ops->client_start)
4930 return handle->ae_algo->ops->client_start(handle);
4933 static void hns3_client_stop(struct hnae3_handle *handle)
4935 if (!handle->ae_algo->ops->client_stop)
4938 handle->ae_algo->ops->client_stop(handle);
4941 static void hns3_info_show(struct hns3_nic_priv *priv)
4943 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
4945 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
4946 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
4947 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
4948 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
4949 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
4950 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
4951 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
4952 dev_info(priv->dev, "Total number of enabled TCs: %u\n",
4953 kinfo->tc_info.num_tc);
4954 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
4957 static int hns3_client_init(struct hnae3_handle *handle)
4959 struct pci_dev *pdev = handle->pdev;
4960 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4961 u16 alloc_tqps, max_rss_size;
4962 struct hns3_nic_priv *priv;
4963 struct net_device *netdev;
4966 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
4968 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
4972 priv = netdev_priv(netdev);
4973 priv->dev = &pdev->dev;
4974 priv->netdev = netdev;
4975 priv->ae_handle = handle;
4976 priv->tx_timeout_count = 0;
4977 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
4978 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
4980 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
4982 handle->kinfo.netdev = netdev;
4983 handle->priv = (void *)priv;
4985 hns3_init_mac_addr(netdev);
4987 hns3_set_default_feature(netdev);
4989 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
4990 netdev->priv_flags |= IFF_UNICAST_FLT;
4991 netdev->netdev_ops = &hns3_nic_netdev_ops;
4992 SET_NETDEV_DEV(netdev, &pdev->dev);
4993 hns3_ethtool_set_ops(netdev);
4995 /* Carrier off reporting is important to ethtool even BEFORE open */
4996 netif_carrier_off(netdev);
4998 ret = hns3_get_ring_config(priv);
5001 goto out_get_ring_cfg;
5004 hns3_nic_init_coal_cfg(priv);
5006 ret = hns3_nic_alloc_vector_data(priv);
5009 goto out_alloc_vector_data;
5012 ret = hns3_nic_init_vector_data(priv);
5015 goto out_init_vector_data;
5018 ret = hns3_init_all_ring(priv);
5024 ret = hns3_init_phy(netdev);
5028 /* the device can work without cpu rmap, only aRFS needs it */
5029 ret = hns3_set_rx_cpu_rmap(netdev);
5031 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5033 ret = hns3_nic_init_irq(priv);
5035 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5036 hns3_free_rx_cpu_rmap(netdev);
5037 goto out_init_irq_fail;
5040 ret = hns3_client_start(handle);
5042 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5043 goto out_client_start;
5046 hns3_dcbnl_setup(handle);
5048 ret = hns3_dbg_init(handle);
5050 dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
5052 goto out_client_start;
5055 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
5057 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
5058 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
5060 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
5061 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
5063 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5065 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5066 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
5068 ret = register_netdev(netdev);
5070 dev_err(priv->dev, "probe register netdev fail!\n");
5071 goto out_reg_netdev_fail;
5074 if (netif_msg_drv(handle))
5075 hns3_info_show(priv);
5079 out_reg_netdev_fail:
5080 hns3_dbg_uninit(handle);
5082 hns3_free_rx_cpu_rmap(netdev);
5083 hns3_nic_uninit_irq(priv);
5085 hns3_uninit_phy(netdev);
5087 hns3_uninit_all_ring(priv);
5089 hns3_nic_uninit_vector_data(priv);
5090 out_init_vector_data:
5091 hns3_nic_dealloc_vector_data(priv);
5092 out_alloc_vector_data:
5095 priv->ae_handle = NULL;
5096 free_netdev(netdev);
5100 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
5102 struct net_device *netdev = handle->kinfo.netdev;
5103 struct hns3_nic_priv *priv = netdev_priv(netdev);
5105 if (netdev->reg_state != NETREG_UNINITIALIZED)
5106 unregister_netdev(netdev);
5108 hns3_client_stop(handle);
5110 hns3_uninit_phy(netdev);
5112 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5113 netdev_warn(netdev, "already uninitialized\n");
5114 goto out_netdev_free;
5117 hns3_free_rx_cpu_rmap(netdev);
5119 hns3_nic_uninit_irq(priv);
5121 hns3_clear_all_ring(handle, true);
5123 hns3_nic_uninit_vector_data(priv);
5125 hns3_nic_dealloc_vector_data(priv);
5127 hns3_uninit_all_ring(priv);
5129 hns3_put_ring_config(priv);
5132 hns3_dbg_uninit(handle);
5133 free_netdev(netdev);
5136 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
5138 struct net_device *netdev = handle->kinfo.netdev;
5144 netif_tx_wake_all_queues(netdev);
5145 netif_carrier_on(netdev);
5146 if (netif_msg_link(handle))
5147 netdev_info(netdev, "link up\n");
5149 netif_carrier_off(netdev);
5150 netif_tx_stop_all_queues(netdev);
5151 if (netif_msg_link(handle))
5152 netdev_info(netdev, "link down\n");
5156 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
5158 while (ring->next_to_clean != ring->next_to_use) {
5159 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
5160 hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
5161 ring_ptr_move_fw(ring, next_to_clean);
5164 ring->pending_buf = 0;
5167 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
5169 struct hns3_desc_cb res_cbs;
5172 while (ring->next_to_use != ring->next_to_clean) {
5173 /* When a buffer is not reused, it's memory has been
5174 * freed in hns3_handle_rx_bd or will be freed by
5175 * stack, so we need to replace the buffer here.
5177 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5178 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
5180 u64_stats_update_begin(&ring->syncp);
5181 ring->stats.sw_err_cnt++;
5182 u64_stats_update_end(&ring->syncp);
5183 /* if alloc new buffer fail, exit directly
5184 * and reclear in up flow.
5186 netdev_warn(ring_to_netdev(ring),
5187 "reserve buffer map failed, ret = %d\n",
5191 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
5193 ring_ptr_move_fw(ring, next_to_use);
5196 /* Free the pending skb in rx ring */
5198 dev_kfree_skb_any(ring->skb);
5200 ring->pending_buf = 0;
5206 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
5208 while (ring->next_to_use != ring->next_to_clean) {
5209 /* When a buffer is not reused, it's memory has been
5210 * freed in hns3_handle_rx_bd or will be freed by
5211 * stack, so only need to unmap the buffer here.
5213 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5214 hns3_unmap_buffer(ring,
5215 &ring->desc_cb[ring->next_to_use]);
5216 ring->desc_cb[ring->next_to_use].dma = 0;
5219 ring_ptr_move_fw(ring, next_to_use);
5223 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
5225 struct net_device *ndev = h->kinfo.netdev;
5226 struct hns3_nic_priv *priv = netdev_priv(ndev);
5229 for (i = 0; i < h->kinfo.num_tqps; i++) {
5230 struct hns3_enet_ring *ring;
5232 ring = &priv->ring[i];
5233 hns3_clear_tx_ring(ring);
5235 ring = &priv->ring[i + h->kinfo.num_tqps];
5236 /* Continue to clear other rings even if clearing some
5240 hns3_force_clear_rx_ring(ring);
5242 hns3_clear_rx_ring(ring);
5246 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
5248 struct net_device *ndev = h->kinfo.netdev;
5249 struct hns3_nic_priv *priv = netdev_priv(ndev);
5250 struct hns3_enet_ring *rx_ring;
5254 ret = h->ae_algo->ops->reset_queue(h);
5258 for (i = 0; i < h->kinfo.num_tqps; i++) {
5259 hns3_init_ring_hw(&priv->ring[i]);
5261 /* We need to clear tx ring here because self test will
5262 * use the ring and will not run down before up
5264 hns3_clear_tx_ring(&priv->ring[i]);
5265 priv->ring[i].next_to_clean = 0;
5266 priv->ring[i].next_to_use = 0;
5267 priv->ring[i].last_to_use = 0;
5269 rx_ring = &priv->ring[i + h->kinfo.num_tqps];
5270 hns3_init_ring_hw(rx_ring);
5271 ret = hns3_clear_rx_ring(rx_ring);
5275 /* We can not know the hardware head and tail when this
5276 * function is called in reset flow, so we reuse all desc.
5278 for (j = 0; j < rx_ring->desc_num; j++)
5279 hns3_reuse_buffer(rx_ring, j);
5281 rx_ring->next_to_clean = 0;
5282 rx_ring->next_to_use = 0;
5285 hns3_init_tx_ring_tc(priv);
5290 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
5292 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5293 struct net_device *ndev = kinfo->netdev;
5294 struct hns3_nic_priv *priv = netdev_priv(ndev);
5296 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
5299 if (!netif_running(ndev))
5302 return hns3_nic_net_stop(ndev);
5305 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
5307 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5308 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
5311 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5312 netdev_err(kinfo->netdev, "device is not initialized yet\n");
5316 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5318 if (netif_running(kinfo->netdev)) {
5319 ret = hns3_nic_net_open(kinfo->netdev);
5321 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5322 netdev_err(kinfo->netdev,
5323 "net up fail, ret=%d!\n", ret);
5331 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
5333 struct net_device *netdev = handle->kinfo.netdev;
5334 struct hns3_nic_priv *priv = netdev_priv(netdev);
5337 /* Carrier off reporting is important to ethtool even BEFORE open */
5338 netif_carrier_off(netdev);
5340 ret = hns3_get_ring_config(priv);
5344 ret = hns3_nic_alloc_vector_data(priv);
5348 ret = hns3_nic_init_vector_data(priv);
5350 goto err_dealloc_vector;
5352 ret = hns3_init_all_ring(priv);
5354 goto err_uninit_vector;
5356 /* the device can work without cpu rmap, only aRFS needs it */
5357 ret = hns3_set_rx_cpu_rmap(netdev);
5359 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5361 ret = hns3_nic_init_irq(priv);
5363 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5364 hns3_free_rx_cpu_rmap(netdev);
5365 goto err_init_irq_fail;
5368 if (!hns3_is_phys_func(handle->pdev))
5369 hns3_init_mac_addr(netdev);
5371 ret = hns3_client_start(handle);
5373 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5374 goto err_client_start_fail;
5377 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5381 err_client_start_fail:
5382 hns3_free_rx_cpu_rmap(netdev);
5383 hns3_nic_uninit_irq(priv);
5385 hns3_uninit_all_ring(priv);
5387 hns3_nic_uninit_vector_data(priv);
5389 hns3_nic_dealloc_vector_data(priv);
5391 hns3_put_ring_config(priv);
5396 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
5398 struct net_device *netdev = handle->kinfo.netdev;
5399 struct hns3_nic_priv *priv = netdev_priv(netdev);
5401 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5402 netdev_warn(netdev, "already uninitialized\n");
5406 hns3_free_rx_cpu_rmap(netdev);
5407 hns3_nic_uninit_irq(priv);
5408 hns3_clear_all_ring(handle, true);
5409 hns3_reset_tx_queue(priv->ae_handle);
5411 hns3_nic_uninit_vector_data(priv);
5413 hns3_nic_dealloc_vector_data(priv);
5415 hns3_uninit_all_ring(priv);
5417 hns3_put_ring_config(priv);
5422 static int hns3_reset_notify(struct hnae3_handle *handle,
5423 enum hnae3_reset_notify_type type)
5428 case HNAE3_UP_CLIENT:
5429 ret = hns3_reset_notify_up_enet(handle);
5431 case HNAE3_DOWN_CLIENT:
5432 ret = hns3_reset_notify_down_enet(handle);
5434 case HNAE3_INIT_CLIENT:
5435 ret = hns3_reset_notify_init_enet(handle);
5437 case HNAE3_UNINIT_CLIENT:
5438 ret = hns3_reset_notify_uninit_enet(handle);
5447 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
5448 bool rxfh_configured)
5452 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
5455 dev_err(&handle->pdev->dev,
5456 "Change tqp num(%u) fail.\n", new_tqp_num);
5460 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
5464 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT);
5466 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
5471 int hns3_set_channels(struct net_device *netdev,
5472 struct ethtool_channels *ch)
5474 struct hnae3_handle *h = hns3_get_handle(netdev);
5475 struct hnae3_knic_private_info *kinfo = &h->kinfo;
5476 bool rxfh_configured = netif_is_rxfh_configured(netdev);
5477 u32 new_tqp_num = ch->combined_count;
5481 if (hns3_nic_resetting(netdev))
5484 if (ch->rx_count || ch->tx_count)
5487 if (kinfo->tc_info.mqprio_active) {
5488 dev_err(&netdev->dev,
5489 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5493 if (new_tqp_num > hns3_get_max_available_channels(h) ||
5495 dev_err(&netdev->dev,
5496 "Change tqps fail, the tqp range is from 1 to %u",
5497 hns3_get_max_available_channels(h));
5501 if (kinfo->rss_size == new_tqp_num)
5504 netif_dbg(h, drv, netdev,
5505 "set channels: tqp_num=%u, rxfh=%d\n",
5506 new_tqp_num, rxfh_configured);
5508 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
5512 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
5516 org_tqp_num = h->kinfo.num_tqps;
5517 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
5522 "Change channels fail, revert to old value\n");
5523 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
5526 "revert to old channel fail\n");
5536 static const struct hns3_hw_error_info hns3_hw_err[] = {
5537 { .type = HNAE3_PPU_POISON_ERROR,
5538 .msg = "PPU poison" },
5539 { .type = HNAE3_CMDQ_ECC_ERROR,
5540 .msg = "IMP CMDQ error" },
5541 { .type = HNAE3_IMP_RD_POISON_ERROR,
5542 .msg = "IMP RD poison" },
5543 { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
5544 .msg = "ROCEE AXI RESP error" },
5547 static void hns3_process_hw_error(struct hnae3_handle *handle,
5548 enum hnae3_hw_error_type type)
5552 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
5553 if (hns3_hw_err[i].type == type) {
5554 dev_err(&handle->pdev->dev, "Detected %s!\n",
5555 hns3_hw_err[i].msg);
5561 static const struct hnae3_client_ops client_ops = {
5562 .init_instance = hns3_client_init,
5563 .uninit_instance = hns3_client_uninit,
5564 .link_status_change = hns3_link_status_change,
5565 .reset_notify = hns3_reset_notify,
5566 .process_hw_error = hns3_process_hw_error,
5569 /* hns3_init_module - Driver registration routine
5570 * hns3_init_module is the first routine called when the driver is
5571 * loaded. All it does is register with the PCI subsystem.
5573 static int __init hns3_init_module(void)
5577 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
5578 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
5580 client.type = HNAE3_CLIENT_KNIC;
5581 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
5584 client.ops = &client_ops;
5586 INIT_LIST_HEAD(&client.node);
5588 hns3_dbg_register_debugfs(hns3_driver_name);
5590 ret = hnae3_register_client(&client);
5592 goto err_reg_client;
5594 ret = pci_register_driver(&hns3_driver);
5596 goto err_reg_driver;
5601 hnae3_unregister_client(&client);
5603 hns3_dbg_unregister_debugfs();
5606 module_init(hns3_init_module);
5608 /* hns3_exit_module - Driver exit cleanup routine
5609 * hns3_exit_module is called just before the driver is removed
5612 static void __exit hns3_exit_module(void)
5614 pci_unregister_driver(&hns3_driver);
5615 hnae3_unregister_client(&client);
5616 hns3_dbg_unregister_debugfs();
5618 module_exit(hns3_exit_module);
5620 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
5621 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5622 MODULE_LICENSE("GPL");
5623 MODULE_ALIAS("pci:hns-nic");