net: atlantic: loopback tests via private flags
[linux-2.6-microblaze.git] / drivers / net / ethernet / aquantia / atlantic / hw_atl / hw_atl_b0.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5  */
6
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
8
9 #include "../aq_hw.h"
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "../aq_phy.h"
14 #include "hw_atl_b0.h"
15 #include "hw_atl_utils.h"
16 #include "hw_atl_llh.h"
17 #include "hw_atl_b0_internal.h"
18 #include "hw_atl_llh_internal.h"
19
20 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
21         .is_64_dma = true,                \
22         .msix_irqs = 8U,                  \
23         .irq_mask = ~0U,                  \
24         .vecs = HW_ATL_B0_RSS_MAX,        \
25         .tcs = HW_ATL_B0_TC_MAX,          \
26         .rxd_alignment = 1U,              \
27         .rxd_size = HW_ATL_B0_RXD_SIZE,   \
28         .rxds_max = HW_ATL_B0_MAX_RXD,    \
29         .rxds_min = HW_ATL_B0_MIN_RXD,    \
30         .txd_alignment = 1U,              \
31         .txd_size = HW_ATL_B0_TXD_SIZE,   \
32         .txds_max = HW_ATL_B0_MAX_TXD,    \
33         .txds_min = HW_ATL_B0_MIN_TXD,    \
34         .txhwb_alignment = 4096U,         \
35         .tx_rings = HW_ATL_B0_TX_RINGS,   \
36         .rx_rings = HW_ATL_B0_RX_RINGS,   \
37         .hw_features = NETIF_F_HW_CSUM |  \
38                         NETIF_F_RXCSUM |  \
39                         NETIF_F_RXHASH |  \
40                         NETIF_F_SG |      \
41                         NETIF_F_TSO |     \
42                         NETIF_F_LRO |     \
43                         NETIF_F_NTUPLE |  \
44                         NETIF_F_HW_VLAN_CTAG_FILTER | \
45                         NETIF_F_HW_VLAN_CTAG_RX |     \
46                         NETIF_F_HW_VLAN_CTAG_TX,      \
47         .hw_priv_flags = IFF_UNICAST_FLT, \
48         .flow_control = true,             \
49         .mtu = HW_ATL_B0_MTU_JUMBO,       \
50         .mac_regs_count = 88,             \
51         .hw_alive_check_addr = 0x10U
52
53 #define FRAC_PER_NS 0x100000000LL
54
55 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
56         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
57         .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
58         .link_speed_msk = AQ_NIC_RATE_10G |
59                           AQ_NIC_RATE_5G |
60                           AQ_NIC_RATE_2GS |
61                           AQ_NIC_RATE_1G |
62                           AQ_NIC_RATE_100M,
63 };
64
65 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
66         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
67         .media_type = AQ_HW_MEDIA_TYPE_TP,
68         .link_speed_msk = AQ_NIC_RATE_10G |
69                           AQ_NIC_RATE_5G |
70                           AQ_NIC_RATE_2GS |
71                           AQ_NIC_RATE_1G |
72                           AQ_NIC_RATE_100M,
73 };
74
75 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
76         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
77         .media_type = AQ_HW_MEDIA_TYPE_TP,
78         .link_speed_msk = AQ_NIC_RATE_5G |
79                           AQ_NIC_RATE_2GS |
80                           AQ_NIC_RATE_1G |
81                           AQ_NIC_RATE_100M,
82 };
83
84 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
85         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
86         .media_type = AQ_HW_MEDIA_TYPE_TP,
87         .link_speed_msk = AQ_NIC_RATE_2GS |
88                           AQ_NIC_RATE_1G |
89                           AQ_NIC_RATE_100M,
90 };
91
92 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
93 {
94         int err = 0;
95
96         err = hw_atl_utils_soft_reset(self);
97         if (err)
98                 return err;
99
100         self->aq_fw_ops->set_state(self, MPI_RESET);
101
102         err = aq_hw_err_from_flags(self);
103
104         return err;
105 }
106
107 static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
108 {
109         hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
110         return 0;
111 }
112
113 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
114 {
115         u32 tc = 0U;
116         u32 buff_size = 0U;
117         unsigned int i_priority = 0U;
118
119         /* TPS Descriptor rate init */
120         hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
121         hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
122
123         /* TPS VM init */
124         hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
125
126         /* TPS TC credits init */
127         hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
128         hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
129
130         tc = 0;
131
132         /* TX Packet Scheduler Data TC0 */
133         hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
134         hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
135         hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
136         hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
137
138         /* Tx buf size TC0 */
139         buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
140
141         hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
142         hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
143                                                    (buff_size *
144                                                    (1024 / 32U) * 66U) /
145                                                    100U, tc);
146         hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
147                                                    (buff_size *
148                                                    (1024 / 32U) * 50U) /
149                                                    100U, tc);
150         /* Init TC2 for PTP_TX */
151         tc = 2;
152
153         hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
154                                                tc);
155
156         /* QoS Rx buf size per TC */
157         tc = 0;
158         buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
159
160         hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
161         hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
162                                                    (buff_size *
163                                                    (1024U / 32U) * 66U) /
164                                                    100U, tc);
165         hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
166                                                    (buff_size *
167                                                    (1024U / 32U) * 50U) /
168                                                    100U, tc);
169
170         hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
171
172         /* Init TC2 for PTP_RX */
173         tc = 2;
174
175         hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
176                                                tc);
177         /* No flow control for PTP */
178         hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
179
180         /* QoS 802.1p priority -> TC mapping */
181         for (i_priority = 8U; i_priority--;)
182                 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
183
184         return aq_hw_err_from_flags(self);
185 }
186
187 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
188                                      struct aq_rss_parameters *rss_params)
189 {
190         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
191         int err = 0;
192         unsigned int i = 0U;
193         unsigned int addr = 0U;
194         u32 val;
195
196         for (i = 10, addr = 0U; i--; ++addr) {
197                 u32 key_data = cfg->is_rss ?
198                         __swab32(rss_params->hash_secret_key[i]) : 0U;
199                 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
200                 hw_atl_rpf_rss_key_addr_set(self, addr);
201                 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
202                 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
203                                                 self, val, val == 0,
204                                                 1000U, 10000U);
205                 if (err < 0)
206                         goto err_exit;
207         }
208
209         err = aq_hw_err_from_flags(self);
210
211 err_exit:
212         return err;
213 }
214
215 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
216                                 struct aq_rss_parameters *rss_params)
217 {
218         u8 *indirection_table = rss_params->indirection_table;
219         u32 i = 0U;
220         u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
221         int err = 0;
222         u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
223                    HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
224         u32 val;
225
226         memset(bitary, 0, sizeof(bitary));
227
228         for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
229                 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
230                         ((indirection_table[i] % num_rss_queues) <<
231                         ((i * 3U) & 0xFU));
232         }
233
234         for (i = ARRAY_SIZE(bitary); i--;) {
235                 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
236                 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
237                 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
238                 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
239                                                 self, val, val == 0,
240                                                 1000U, 10000U);
241                 if (err < 0)
242                         goto err_exit;
243         }
244
245         err = aq_hw_err_from_flags(self);
246
247 err_exit:
248         return err;
249 }
250
251 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
252                                     struct aq_nic_cfg_s *aq_nic_cfg)
253 {
254         unsigned int i;
255
256         /* TX checksums offloads*/
257         hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
258         hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
259
260         /* RX checksums offloads*/
261         hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
262                                                  NETIF_F_RXCSUM));
263         hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
264                                               NETIF_F_RXCSUM));
265
266         /* LSO offloads*/
267         hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
268
269         /* Outer VLAN tag offload */
270         hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
271
272 /* LRO offloads */
273         {
274                 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
275                         ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
276                         ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
277
278                 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
279                         hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
280
281                 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
282                 hw_atl_rpo_lro_inactive_interval_set(self, 0);
283                 /* the LRO timebase divider is 5 uS (0x61a),
284                  * which is multiplied by 50(0x32)
285                  * to get a maximum coalescing interval of 250 uS,
286                  * which is the default value
287                  */
288                 hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
289
290                 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
291
292                 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
293
294                 hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
295
296                 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
297
298                 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
299
300                 hw_atl_rpo_lro_en_set(self,
301                                       aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
302                 hw_atl_itr_rsc_en_set(self,
303                                       aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
304
305                 hw_atl_itr_rsc_delay_set(self, 1U);
306         }
307         return aq_hw_err_from_flags(self);
308 }
309
310 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
311 {
312         /* Tx TC/Queue number config */
313         hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
314
315         hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
316         hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
317         hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
318
319         /* Tx interrupts */
320         hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
321
322         /* misc */
323         aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
324                         0x00010000U : 0x00000000U);
325         hw_atl_tdm_tx_dca_en_set(self, 0U);
326         hw_atl_tdm_tx_dca_mode_set(self, 0U);
327
328         hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
329
330         return aq_hw_err_from_flags(self);
331 }
332
333 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
334 {
335         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
336         int i;
337
338         /* Rx TC/RSS number config */
339         hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
340
341         /* Rx flow control */
342         hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
343
344         /* RSS Ring selection */
345         hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
346                                         0xB3333333U : 0x00000000U);
347
348         /* Multicast filters */
349         for (i = HW_ATL_B0_MAC_MAX; i--;) {
350                 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
351                 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
352         }
353
354         hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
355         hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
356
357         /* Vlan filters */
358         hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
359         hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
360
361         hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
362
363         // Always accept untagged packets
364         hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
365         hw_atl_rpf_vlan_untagged_act_set(self, 1U);
366
367         /* Rx Interrupts */
368         hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
369
370         /* misc */
371         aq_hw_write_reg(self, 0x00005040U,
372                         IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
373
374         hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
375         hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
376
377         hw_atl_rdm_rx_dca_en_set(self, 0U);
378         hw_atl_rdm_rx_dca_mode_set(self, 0U);
379
380         return aq_hw_err_from_flags(self);
381 }
382
383 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
384 {
385         int err = 0;
386         unsigned int h = 0U;
387         unsigned int l = 0U;
388
389         if (!mac_addr) {
390                 err = -EINVAL;
391                 goto err_exit;
392         }
393         h = (mac_addr[0] << 8) | (mac_addr[1]);
394         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
395                 (mac_addr[4] << 8) | mac_addr[5];
396
397         hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
398         hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
399         hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
400         hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
401
402         err = aq_hw_err_from_flags(self);
403
404 err_exit:
405         return err;
406 }
407
408 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
409 {
410         static u32 aq_hw_atl_igcr_table_[4][2] = {
411                 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
412                 [AQ_HW_IRQ_LEGACY]  = { 0x20000080U, 0x20000080U },
413                 [AQ_HW_IRQ_MSI]     = { 0x20000021U, 0x20000025U },
414                 [AQ_HW_IRQ_MSIX]    = { 0x20000022U, 0x20000026U },
415         };
416
417         int err = 0;
418         u32 val;
419
420         struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
421
422         hw_atl_b0_hw_init_tx_path(self);
423         hw_atl_b0_hw_init_rx_path(self);
424
425         hw_atl_b0_hw_mac_addr_set(self, mac_addr);
426
427         self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
428         self->aq_fw_ops->set_state(self, MPI_INIT);
429
430         hw_atl_b0_hw_qos_set(self);
431         hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
432         hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
433
434         /* Force limit MRRS on RDM/TDM to 2K */
435         val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
436         aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
437                         (val & ~0x707) | 0x404);
438
439         /* TX DMA total request limit. B0 hardware is not capable to
440          * handle more than (8K-MRRS) incoming DMA data.
441          * Value 24 in 256byte units
442          */
443         aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
444
445         /* Reset link status and read out initial hardware counters */
446         self->aq_link_status.mbps = 0;
447         self->aq_fw_ops->update_stats(self);
448
449         err = aq_hw_err_from_flags(self);
450         if (err < 0)
451                 goto err_exit;
452
453         /* Interrupts */
454         hw_atl_reg_irq_glb_ctl_set(self,
455                                    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
456                                                  [(aq_nic_cfg->vecs > 1U) ?
457                                                  1 : 0]);
458
459         hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
460
461         /* Interrupts */
462         hw_atl_reg_gen_irq_map_set(self,
463                                    ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
464                             ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
465
466         /* Enable link interrupt */
467         if (aq_nic_cfg->link_irq_vec)
468                 hw_atl_reg_gen_irq_map_set(self, BIT(7) |
469                                            aq_nic_cfg->link_irq_vec, 3U);
470
471         hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
472
473 err_exit:
474         return err;
475 }
476
477 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
478                                       struct aq_ring_s *ring)
479 {
480         hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
481         return aq_hw_err_from_flags(self);
482 }
483
484 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
485                                       struct aq_ring_s *ring)
486 {
487         hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
488         return aq_hw_err_from_flags(self);
489 }
490
491 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
492 {
493         hw_atl_tpb_tx_buff_en_set(self, 1);
494         hw_atl_rpb_rx_buff_en_set(self, 1);
495         return aq_hw_err_from_flags(self);
496 }
497
498 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
499                                             struct aq_ring_s *ring)
500 {
501         hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
502         return 0;
503 }
504
505 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
506                                      struct aq_ring_s *ring,
507                                      unsigned int frags)
508 {
509         struct aq_ring_buff_s *buff = NULL;
510         struct hw_atl_txd_s *txd = NULL;
511         unsigned int buff_pa_len = 0U;
512         unsigned int pkt_len = 0U;
513         unsigned int frag_count = 0U;
514         bool is_vlan = false;
515         bool is_gso = false;
516
517         buff = &ring->buff_ring[ring->sw_tail];
518         pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
519
520         for (frag_count = 0; frag_count < frags; frag_count++) {
521                 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
522                                                 HW_ATL_B0_TXD_SIZE];
523                 txd->ctl = 0;
524                 txd->ctl2 = 0;
525                 txd->buf_addr = 0;
526
527                 buff = &ring->buff_ring[ring->sw_tail];
528
529                 if (buff->is_gso) {
530                         txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
531                         txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
532                         txd->ctl |= (buff->len_l3 << 31) |
533                                     (buff->len_l2 << 24);
534                         txd->ctl2 |= (buff->mss << 16);
535                         is_gso = true;
536
537                         pkt_len -= (buff->len_l4 +
538                                     buff->len_l3 +
539                                     buff->len_l2);
540                         if (buff->is_ipv6)
541                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
542                         txd->ctl2 |= (buff->len_l4 << 8) |
543                                      (buff->len_l3 >> 1);
544                 }
545                 if (buff->is_vlan) {
546                         txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
547                         txd->ctl |= buff->vlan_tx_tag << 4;
548                         is_vlan = true;
549                 }
550                 if (!buff->is_gso && !buff->is_vlan) {
551                         buff_pa_len = buff->len;
552
553                         txd->buf_addr = buff->pa;
554                         txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
555                                                 ((u32)buff_pa_len << 4));
556                         txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
557
558                         /* PAY_LEN */
559                         txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
560
561                         if (is_gso || is_vlan) {
562                                 /* enable tx context */
563                                 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
564                         }
565                         if (is_gso)
566                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
567
568                         /* Tx checksum offloads */
569                         if (buff->is_ip_cso)
570                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
571
572                         if (buff->is_udp_cso || buff->is_tcp_cso)
573                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
574
575                         if (is_vlan)
576                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
577
578                         if (unlikely(buff->is_eop)) {
579                                 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
580                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
581                                 is_gso = false;
582                                 is_vlan = false;
583                         }
584                 }
585                 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
586         }
587
588         hw_atl_b0_hw_tx_ring_tail_update(self, ring);
589         return aq_hw_err_from_flags(self);
590 }
591
592 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
593                                      struct aq_ring_s *aq_ring,
594                                      struct aq_ring_param_s *aq_ring_param)
595 {
596         u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
597         u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
598         u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
599
600         hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
601
602         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
603
604         hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
605                                                   aq_ring->idx);
606
607         hw_atl_reg_rx_dma_desc_base_addressmswset(self,
608                                                   dma_desc_addr_msw, aq_ring->idx);
609
610         hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
611
612         hw_atl_rdm_rx_desc_data_buff_size_set(self,
613                                               AQ_CFG_RX_FRAME_MAX / 1024U,
614                                        aq_ring->idx);
615
616         hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
617         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
618         hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
619                                               aq_ring->idx);
620
621         /* Rx ring set mode */
622
623         /* Mapping interrupt vector */
624         hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
625         hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
626
627         hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
628         hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
629         hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
630         hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
631
632         return aq_hw_err_from_flags(self);
633 }
634
635 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
636                                      struct aq_ring_s *aq_ring,
637                                      struct aq_ring_param_s *aq_ring_param)
638 {
639         u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
640         u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
641
642         hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
643                                                   aq_ring->idx);
644
645         hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
646                                                   aq_ring->idx);
647
648         hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
649
650         hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
651
652         /* Set Tx threshold */
653         hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
654
655         /* Mapping interrupt vector */
656         hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
657         hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
658
659         hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
660         hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
661
662         return aq_hw_err_from_flags(self);
663 }
664
665 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
666                                      struct aq_ring_s *ring,
667                                      unsigned int sw_tail_old)
668 {
669         for (; sw_tail_old != ring->sw_tail;
670                 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
671                 struct hw_atl_rxd_s *rxd =
672                         (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
673                                                         HW_ATL_B0_RXD_SIZE];
674
675                 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
676
677                 rxd->buf_addr = buff->pa;
678                 rxd->hdr_addr = 0U;
679         }
680
681         hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
682
683         return aq_hw_err_from_flags(self);
684 }
685
686 static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
687                                           struct aq_ring_s *ring)
688 {
689         unsigned int i;
690
691         for (i = aq_ring_avail_dx(ring); i--;
692                         ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
693                 struct hw_atl_rxd_s *rxd =
694                         (struct hw_atl_rxd_s *)
695                         &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
696
697                 rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
698                 rxd->hdr_addr = 0U;
699         }
700         /* Make sure descriptors are updated before bump tail*/
701         wmb();
702
703         hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
704
705         return aq_hw_err_from_flags(self);
706 }
707
708 static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
709                                              struct aq_ring_s *ring)
710 {
711         while (ring->hw_head != ring->sw_tail) {
712                 struct hw_atl_rxd_hwts_wb_s *hwts_wb =
713                         (struct hw_atl_rxd_hwts_wb_s *)
714                         (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
715
716                 /* RxD is not done */
717                 if (!(hwts_wb->sec_lw0 & 0x1U))
718                         break;
719
720                 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
721         }
722
723         return aq_hw_err_from_flags(self);
724 }
725
726 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
727                                             struct aq_ring_s *ring)
728 {
729         int err = 0;
730         unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
731
732         if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
733                 err = -ENXIO;
734                 goto err_exit;
735         }
736         ring->hw_head = hw_head_;
737         err = aq_hw_err_from_flags(self);
738
739 err_exit:
740         return err;
741 }
742
743 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
744                                         struct aq_ring_s *ring)
745 {
746         for (; ring->hw_head != ring->sw_tail;
747                 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
748                 struct aq_ring_buff_s *buff = NULL;
749                 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
750                         &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
751
752                 unsigned int is_rx_check_sum_enabled = 0U;
753                 unsigned int pkt_type = 0U;
754                 u8 rx_stat = 0U;
755
756                 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
757                         break;
758                 }
759
760                 buff = &ring->buff_ring[ring->hw_head];
761
762                 buff->flags = 0U;
763                 buff->is_hash_l4 = 0U;
764
765                 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
766
767                 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
768
769                 pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
770                            HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
771
772                 if (is_rx_check_sum_enabled & BIT(0) &&
773                     (0x0U == (pkt_type & 0x3U)))
774                         buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
775
776                 if (is_rx_check_sum_enabled & BIT(1)) {
777                         if (0x4U == (pkt_type & 0x1CU))
778                                 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
779                                                    !!(rx_stat & BIT(3));
780                         else if (0x0U == (pkt_type & 0x1CU))
781                                 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
782                                                    !!(rx_stat & BIT(3));
783                 }
784                 buff->is_cso_err = !!(rx_stat & 0x6);
785                 /* Checksum offload workaround for small packets */
786                 if (unlikely(rxd_wb->pkt_len <= 60)) {
787                         buff->is_ip_cso = 0U;
788                         buff->is_cso_err = 0U;
789                 }
790
791                 if (self->aq_nic_cfg->is_vlan_rx_strip &&
792                     ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
793                      (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
794                         buff->is_vlan = 1;
795                         buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
796                 }
797
798                 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
799                         /* MAC error or DMA error */
800                         buff->is_error = 1U;
801                 }
802                 if (self->aq_nic_cfg->is_rss) {
803                         /* last 4 byte */
804                         u16 rss_type = rxd_wb->type & 0xFU;
805
806                         if (rss_type && rss_type < 0x8U) {
807                                 buff->is_hash_l4 = (rss_type == 0x4 ||
808                                 rss_type == 0x5);
809                                 buff->rss_hash = rxd_wb->rss_hash;
810                         }
811                 }
812
813                 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
814                         buff->len = rxd_wb->pkt_len %
815                                 AQ_CFG_RX_FRAME_MAX;
816                         buff->len = buff->len ?
817                                 buff->len : AQ_CFG_RX_FRAME_MAX;
818                         buff->next = 0U;
819                         buff->is_eop = 1U;
820                 } else {
821                         buff->len =
822                                 rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
823                                 AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
824
825                         if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
826                                 rxd_wb->status) {
827                                 /* LRO */
828                                 buff->next = rxd_wb->next_desc_ptr;
829                                 ++ring->stats.rx.lro_packets;
830                         } else {
831                                 /* jumbo */
832                                 buff->next =
833                                         aq_ring_next_dx(ring,
834                                                         ring->hw_head);
835                                 ++ring->stats.rx.jumbo_packets;
836                         }
837                 }
838         }
839
840         return aq_hw_err_from_flags(self);
841 }
842
843 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
844 {
845         hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
846         return aq_hw_err_from_flags(self);
847 }
848
849 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
850 {
851         hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
852         hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
853
854         atomic_inc(&self->dpc);
855         return aq_hw_err_from_flags(self);
856 }
857
858 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
859 {
860         *mask = hw_atl_itr_irq_statuslsw_get(self);
861         return aq_hw_err_from_flags(self);
862 }
863
864 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
865
866 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
867                                           unsigned int packet_filter)
868 {
869         unsigned int i = 0U;
870         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
871
872         hw_atl_rpfl2promiscuous_mode_en_set(self,
873                                             IS_FILTER_ENABLED(IFF_PROMISC));
874
875         hw_atl_rpf_vlan_prom_mode_en_set(self,
876                                      IS_FILTER_ENABLED(IFF_PROMISC) ||
877                                      cfg->is_vlan_force_promisc);
878
879         hw_atl_rpfl2multicast_flr_en_set(self,
880                                          IS_FILTER_ENABLED(IFF_ALLMULTI) &&
881                                          IS_FILTER_ENABLED(IFF_MULTICAST), 0);
882
883         hw_atl_rpfl2_accept_all_mc_packets_set(self,
884                                               IS_FILTER_ENABLED(IFF_ALLMULTI) &&
885                                               IS_FILTER_ENABLED(IFF_MULTICAST));
886
887         hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
888
889
890         for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
891                 hw_atl_rpfl2_uc_flr_en_set(self,
892                                            (cfg->is_mc_list_enabled &&
893                                             (i <= cfg->mc_list_count)) ?
894                                            1U : 0U, i);
895
896         return aq_hw_err_from_flags(self);
897 }
898
899 #undef IS_FILTER_ENABLED
900
901 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
902                                            u8 ar_mac
903                                            [AQ_HW_MULTICAST_ADDRESS_MAX]
904                                            [ETH_ALEN],
905                                            u32 count)
906 {
907         int err = 0;
908
909         if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
910                 err = -EBADRQC;
911                 goto err_exit;
912         }
913         for (self->aq_nic_cfg->mc_list_count = 0U;
914                         self->aq_nic_cfg->mc_list_count < count;
915                         ++self->aq_nic_cfg->mc_list_count) {
916                 u32 i = self->aq_nic_cfg->mc_list_count;
917                 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
918                 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
919                                         (ar_mac[i][4] << 8) | ar_mac[i][5];
920
921                 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
922
923                 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
924                                                         l, HW_ATL_B0_MAC_MIN + i);
925
926                 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
927                                                         h, HW_ATL_B0_MAC_MIN + i);
928
929                 hw_atl_rpfl2_uc_flr_en_set(self,
930                                            (self->aq_nic_cfg->is_mc_list_enabled),
931                                            HW_ATL_B0_MAC_MIN + i);
932         }
933
934         err = aq_hw_err_from_flags(self);
935
936 err_exit:
937         return err;
938 }
939
940 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
941 {
942         unsigned int i = 0U;
943         u32 itr_tx = 2U;
944         u32 itr_rx = 2U;
945
946         switch (self->aq_nic_cfg->itr) {
947         case  AQ_CFG_INTERRUPT_MODERATION_ON:
948         case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
949                 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
950                 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
951                 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
952                 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
953
954                 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
955                         /* HW timers are in 2us units */
956                         int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
957                         int tx_min_timer = tx_max_timer / 2;
958
959                         int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
960                         int rx_min_timer = rx_max_timer / 2;
961
962                         tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
963                         tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
964                         rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
965                         rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
966
967                         itr_tx |= tx_min_timer << 0x8U;
968                         itr_tx |= tx_max_timer << 0x10U;
969                         itr_rx |= rx_min_timer << 0x8U;
970                         itr_rx |= rx_max_timer << 0x10U;
971                 } else {
972                         static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
973                                 {0xfU, 0xffU}, /* 10Gbit */
974                                 {0xfU, 0x1ffU}, /* 5Gbit */
975                                 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
976                                 {0xfU, 0x1ffU}, /* 2.5Gbit */
977                                 {0xfU, 0x1ffU}, /* 1Gbit */
978                                 {0xfU, 0x1ffU}, /* 100Mbit */
979                         };
980
981                         static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
982                                 {0x6U, 0x38U},/* 10Gbit */
983                                 {0xCU, 0x70U},/* 5Gbit */
984                                 {0xCU, 0x70U},/* 5Gbit 5GS */
985                                 {0x18U, 0xE0U},/* 2.5Gbit */
986                                 {0x30U, 0x80U},/* 1Gbit */
987                                 {0x4U, 0x50U},/* 100Mbit */
988                         };
989
990                         unsigned int speed_index =
991                                         hw_atl_utils_mbps_2_speed_index(
992                                                 self->aq_link_status.mbps);
993
994                         /* Update user visible ITR settings */
995                         self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
996                                                         [speed_index][1] * 2;
997                         self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
998                                                         [speed_index][1] * 2;
999
1000                         itr_tx |= hw_atl_b0_timers_table_tx_
1001                                                 [speed_index][0] << 0x8U;
1002                         itr_tx |= hw_atl_b0_timers_table_tx_
1003                                                 [speed_index][1] << 0x10U;
1004
1005                         itr_rx |= hw_atl_b0_timers_table_rx_
1006                                                 [speed_index][0] << 0x8U;
1007                         itr_rx |= hw_atl_b0_timers_table_rx_
1008                                                 [speed_index][1] << 0x10U;
1009                 }
1010                 break;
1011         case AQ_CFG_INTERRUPT_MODERATION_OFF:
1012                 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
1013                 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
1014                 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
1015                 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
1016                 itr_tx = 0U;
1017                 itr_rx = 0U;
1018                 break;
1019         }
1020
1021         for (i = HW_ATL_B0_RINGS_MAX; i--;) {
1022                 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
1023                 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
1024         }
1025
1026         return aq_hw_err_from_flags(self);
1027 }
1028
1029 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
1030 {
1031         int err;
1032         u32 val;
1033
1034         hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
1035
1036         /* Invalidate Descriptor Cache to prevent writing to the cached
1037          * descriptors and to the data pointer of those descriptors
1038          */
1039         hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
1040
1041         err = aq_hw_err_from_flags(self);
1042
1043         if (err)
1044                 goto err_exit;
1045
1046         readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
1047                                   self, val, val == 1, 1000U, 10000U);
1048
1049 err_exit:
1050         return err;
1051 }
1052
1053 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
1054                                      struct aq_ring_s *ring)
1055 {
1056         hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
1057         return aq_hw_err_from_flags(self);
1058 }
1059
1060 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
1061                                      struct aq_ring_s *ring)
1062 {
1063         hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
1064         return aq_hw_err_from_flags(self);
1065 }
1066
1067 static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1068 {
1069         *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
1070         return aq_hw_err_from_flags(self);
1071 }
1072
1073 static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1074 {
1075         *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
1076         return aq_hw_err_from_flags(self);
1077 }
1078
1079 #define get_ptp_ts_val_u64(self, indx) \
1080         ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1081
1082 static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
1083 {
1084         u64 ns;
1085
1086         hw_atl_pcs_ptp_clock_read_enable(self, 1);
1087         hw_atl_pcs_ptp_clock_read_enable(self, 0);
1088         ns = (get_ptp_ts_val_u64(self, 0) +
1089               (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
1090              (get_ptp_ts_val_u64(self, 3) +
1091               (get_ptp_ts_val_u64(self, 4) << 16));
1092
1093         *stamp = ns + self->ptp_clk_offset;
1094 }
1095
1096 static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
1097 {
1098         /* For accuracy, the digit is extended */
1099         s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
1100         u64 nsi_frac = 0;
1101         u64 nsi;
1102
1103         base_ns = div64_s64(base_ns, freq);
1104         nsi = div64_u64(base_ns, NSEC_PER_SEC);
1105
1106         if (base_ns != nsi * NSEC_PER_SEC) {
1107                 s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
1108                                         base_ns - nsi * NSEC_PER_SEC);
1109                 nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
1110         }
1111
1112         *ns = (u32)nsi;
1113         *fns = (u32)nsi_frac;
1114 }
1115
1116 static void
1117 hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
1118                              u64 phyfreq, u64 macfreq)
1119 {
1120         s64 adj_fns_val;
1121         s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
1122                                         FRAC_PER_NS * ptp_adj_freq->ns_phy);
1123         s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
1124                                         FRAC_PER_NS * ptp_adj_freq->ns_mac);
1125         s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
1126         s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
1127         /* MAC MCP counter freq is macfreq / 4 */
1128         s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
1129                                    4 * FRAC_PER_NS;
1130
1131         diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
1132                                          AQ_HW_MAC_COUNTER_HZ);
1133         adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
1134                        ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
1135
1136         ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
1137         ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
1138                                     FRAC_PER_NS;
1139 }
1140
1141 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
1142 {
1143         self->ptp_clk_offset += delta;
1144
1145         return 0;
1146 }
1147
1148 static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
1149 {
1150         s64 delta = time - (self->ptp_clk_offset + ts);
1151
1152         return hw_atl_b0_adj_sys_clock(self, delta);
1153 }
1154
1155 static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
1156 {
1157         *time = self->ptp_clk_offset + ts;
1158         return 0;
1159 }
1160
1161 static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
1162 {
1163         struct hw_fw_request_iface fwreq;
1164         size_t size;
1165
1166         memset(&fwreq, 0, sizeof(fwreq));
1167
1168         fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
1169         hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
1170                                  &fwreq.ptp_adj_freq.ns_mac,
1171                                  &fwreq.ptp_adj_freq.fns_mac);
1172         hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
1173                                  &fwreq.ptp_adj_freq.ns_phy,
1174                                  &fwreq.ptp_adj_freq.fns_phy);
1175         hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
1176                                      AQ_HW_PHY_COUNTER_HZ,
1177                                      AQ_HW_MAC_COUNTER_HZ);
1178
1179         size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
1180         return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1181 }
1182
1183 static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
1184                                 u64 start, u32 period)
1185 {
1186         struct hw_fw_request_iface fwreq;
1187         size_t size;
1188
1189         memset(&fwreq, 0, sizeof(fwreq));
1190
1191         fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
1192         fwreq.ptp_gpio_ctrl.index = index;
1193         fwreq.ptp_gpio_ctrl.period = period;
1194         /* Apply time offset */
1195         fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
1196
1197         size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
1198         return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1199 }
1200
1201 static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
1202                                        u32 enable)
1203 {
1204         /* Enable/disable Sync1588 GPIO Timestamping */
1205         aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
1206
1207         return 0;
1208 }
1209
1210 static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
1211 {
1212         u64 sec_l;
1213         u64 sec_h;
1214         u64 nsec_l;
1215         u64 nsec_h;
1216
1217         if (!ts)
1218                 return -1;
1219
1220         /* PTP external GPIO clock seconds count 15:0 */
1221         sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
1222         /* PTP external GPIO clock seconds count 31:16 */
1223         sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
1224         /* PTP external GPIO clock nanoseconds count 15:0 */
1225         nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
1226         /* PTP external GPIO clock nanoseconds count 31:16 */
1227         nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
1228
1229         *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
1230
1231         return 0;
1232 }
1233
1234 static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
1235                                    unsigned int len, u64 *timestamp)
1236 {
1237         unsigned int offset = 14;
1238         struct ethhdr *eth;
1239         __be64 sec;
1240         __be32 ns;
1241         u8 *ptr;
1242
1243         if (len <= offset || !timestamp)
1244                 return 0;
1245
1246         /* The TIMESTAMP in the end of package has following format:
1247          * (big-endian)
1248          *   struct {
1249          *     uint64_t sec;
1250          *     uint32_t ns;
1251          *     uint16_t stream_id;
1252          *   };
1253          */
1254         ptr = p + (len - offset);
1255         memcpy(&sec, ptr, sizeof(sec));
1256         ptr += sizeof(sec);
1257         memcpy(&ns, ptr, sizeof(ns));
1258
1259         *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
1260                      be32_to_cpu(ns) + self->ptp_clk_offset;
1261
1262         eth = (struct ethhdr *)p;
1263
1264         return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
1265 }
1266
1267 static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
1268                                   u64 *timestamp)
1269 {
1270         struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
1271         u64 tmp, sec, ns;
1272
1273         sec = 0;
1274         tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
1275         sec += tmp;
1276         tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
1277         sec += tmp;
1278         tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
1279         sec += tmp;
1280         tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
1281         sec += tmp;
1282         ns = sec * NSEC_PER_SEC + hwts_wb->ns;
1283         if (timestamp)
1284                 *timestamp = ns + self->ptp_clk_offset;
1285         return 0;
1286 }
1287
1288 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
1289                                     struct aq_rx_filter_l3l4 *data)
1290 {
1291         u8 location = data->location;
1292
1293         if (!data->is_ipv6) {
1294                 hw_atl_rpfl3l4_cmd_clear(self, location);
1295                 hw_atl_rpf_l4_spd_set(self, 0U, location);
1296                 hw_atl_rpf_l4_dpd_set(self, 0U, location);
1297                 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
1298                 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
1299         } else {
1300                 int i;
1301
1302                 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
1303                         hw_atl_rpfl3l4_cmd_clear(self, location + i);
1304                         hw_atl_rpf_l4_spd_set(self, 0U, location + i);
1305                         hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
1306                 }
1307                 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
1308                 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
1309         }
1310
1311         return aq_hw_err_from_flags(self);
1312 }
1313
1314 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
1315                                   struct aq_rx_filter_l3l4 *data)
1316 {
1317         u8 location = data->location;
1318
1319         hw_atl_b0_hw_fl3l4_clear(self, data);
1320
1321         if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
1322                          HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
1323                 if (!data->is_ipv6) {
1324                         hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1325                                                           location,
1326                                                           data->ip_dst[0]);
1327                         hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1328                                                          location,
1329                                                          data->ip_src[0]);
1330                 } else {
1331                         hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1332                                                           location,
1333                                                           data->ip_dst);
1334                         hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1335                                                          location,
1336                                                          data->ip_src);
1337                 }
1338         }
1339
1340         if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
1341                          HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
1342                 hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1343                 hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1344         }
1345
1346         hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1347
1348         return aq_hw_err_from_flags(self);
1349 }
1350
1351 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1352                                 struct aq_rx_filter_l2 *data)
1353 {
1354         hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1355         hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1356         hw_atl_rpf_etht_user_priority_en_set(self,
1357                                              !!data->user_priority_en,
1358                                              data->location);
1359         if (data->user_priority_en)
1360                 hw_atl_rpf_etht_user_priority_set(self,
1361                                                   data->user_priority,
1362                                                   data->location);
1363
1364         if (data->queue < 0) {
1365                 hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1366                 hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1367         } else {
1368                 hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1369                 hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1370                 hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1371         }
1372
1373         return aq_hw_err_from_flags(self);
1374 }
1375
1376 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1377                                   struct aq_rx_filter_l2 *data)
1378 {
1379         hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1380         hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1381         hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1382
1383         return aq_hw_err_from_flags(self);
1384 }
1385
1386 /**
1387  * @brief Set VLAN filter table
1388  * @details Configure VLAN filter table to accept (and assign the queue) traffic
1389  *  for the particular vlan ids.
1390  * Note: use this function under vlan promisc mode not to lost the traffic
1391  *
1392  * @param aq_hw_s
1393  * @param aq_rx_filter_vlan VLAN filter configuration
1394  * @return 0 - OK, <0 - error
1395  */
1396 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1397                                  struct aq_rx_filter_vlan *aq_vlans)
1398 {
1399         int i;
1400
1401         for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1402                 hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1403                 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1404                 if (aq_vlans[i].enable) {
1405                         hw_atl_rpf_vlan_id_flr_set(self,
1406                                                    aq_vlans[i].vlan_id,
1407                                                    i);
1408                         hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1409                         hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1410                         if (aq_vlans[i].queue != 0xFF) {
1411                                 hw_atl_rpf_vlan_rxq_flr_set(self,
1412                                                             aq_vlans[i].queue,
1413                                                             i);
1414                                 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1415                         }
1416                 }
1417         }
1418
1419         return aq_hw_err_from_flags(self);
1420 }
1421
1422 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1423 {
1424         /* set promisc in case of disabing the vland filter */
1425         hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1426
1427         return aq_hw_err_from_flags(self);
1428 }
1429
1430 static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
1431 {
1432         switch (mode) {
1433         case AQ_HW_LOOPBACK_DMA_SYS:
1434                 hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
1435                 hw_atl_rpb_dma_sys_lbk_set(self, enable);
1436                 break;
1437         case AQ_HW_LOOPBACK_PKT_SYS:
1438                 hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
1439                 hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
1440                 break;
1441         case AQ_HW_LOOPBACK_DMA_NET:
1442                 hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
1443                 hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
1444                 hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
1445                 hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
1446                 hw_atl_rpb_dma_net_lbk_set(self, enable);
1447                 break;
1448         default:
1449                 return -EINVAL;
1450         }
1451         return 0;
1452 }
1453
1454 const struct aq_hw_ops hw_atl_ops_b0 = {
1455         .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
1456         .hw_init              = hw_atl_b0_hw_init,
1457         .hw_reset             = hw_atl_b0_hw_reset,
1458         .hw_start             = hw_atl_b0_hw_start,
1459         .hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
1460         .hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
1461         .hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
1462         .hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
1463         .hw_stop              = hw_atl_b0_hw_stop,
1464
1465         .hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
1466         .hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
1467
1468         .hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
1469         .hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
1470
1471         .hw_irq_enable           = hw_atl_b0_hw_irq_enable,
1472         .hw_irq_disable          = hw_atl_b0_hw_irq_disable,
1473         .hw_irq_read             = hw_atl_b0_hw_irq_read,
1474
1475         .hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
1476         .hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
1477         .hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
1478         .hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
1479         .hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
1480         .hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
1481         .hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
1482         .hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
1483         .hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
1484         .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1485         .hw_rss_set                  = hw_atl_b0_hw_rss_set,
1486         .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
1487         .hw_get_regs                 = hw_atl_utils_hw_get_regs,
1488         .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
1489         .hw_get_fw_version           = hw_atl_utils_get_fw_version,
1490
1491         .hw_tx_tc_mode_get       = hw_atl_b0_tx_tc_mode_get,
1492         .hw_rx_tc_mode_get       = hw_atl_b0_rx_tc_mode_get,
1493
1494         .hw_ring_hwts_rx_fill        = hw_atl_b0_hw_ring_hwts_rx_fill,
1495         .hw_ring_hwts_rx_receive     = hw_atl_b0_hw_ring_hwts_rx_receive,
1496
1497         .hw_get_ptp_ts           = hw_atl_b0_get_ptp_ts,
1498         .hw_adj_sys_clock        = hw_atl_b0_adj_sys_clock,
1499         .hw_set_sys_clock        = hw_atl_b0_set_sys_clock,
1500         .hw_ts_to_sys_clock      = hw_atl_b0_ts_to_sys_clock,
1501         .hw_adj_clock_freq       = hw_atl_b0_adj_clock_freq,
1502         .hw_gpio_pulse           = hw_atl_b0_gpio_pulse,
1503         .hw_extts_gpio_enable    = hw_atl_b0_extts_gpio_enable,
1504         .hw_get_sync_ts          = hw_atl_b0_get_sync_ts,
1505         .rx_extract_ts           = hw_atl_b0_rx_extract_ts,
1506         .extract_hwts            = hw_atl_b0_extract_hwts,
1507         .hw_set_offload          = hw_atl_b0_hw_offload_set,
1508         .hw_get_hw_stats         = hw_atl_utils_get_hw_stats,
1509         .hw_get_fw_version       = hw_atl_utils_get_fw_version,
1510         .hw_set_offload          = hw_atl_b0_hw_offload_set,
1511         .hw_set_loopback         = hw_atl_b0_set_loopback,
1512         .hw_set_fc               = hw_atl_b0_set_fc,
1513 };