Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / drivers / net / ethernet / aquantia / atlantic / hw_atl / hw_atl_b0.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5  */
6
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
8
9 #include "../aq_hw.h"
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "hw_atl_b0.h"
14 #include "hw_atl_utils.h"
15 #include "hw_atl_llh.h"
16 #include "hw_atl_b0_internal.h"
17 #include "hw_atl_llh_internal.h"
18
19 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
20         .is_64_dma = true,                \
21         .msix_irqs = 8U,                  \
22         .irq_mask = ~0U,                  \
23         .vecs = HW_ATL_B0_RSS_MAX,        \
24         .tcs = HW_ATL_B0_TC_MAX,          \
25         .rxd_alignment = 1U,              \
26         .rxd_size = HW_ATL_B0_RXD_SIZE,   \
27         .rxds_max = HW_ATL_B0_MAX_RXD,    \
28         .rxds_min = HW_ATL_B0_MIN_RXD,    \
29         .txd_alignment = 1U,              \
30         .txd_size = HW_ATL_B0_TXD_SIZE,   \
31         .txds_max = HW_ATL_B0_MAX_TXD,    \
32         .txds_min = HW_ATL_B0_MIN_TXD,    \
33         .txhwb_alignment = 4096U,         \
34         .tx_rings = HW_ATL_B0_TX_RINGS,   \
35         .rx_rings = HW_ATL_B0_RX_RINGS,   \
36         .hw_features = NETIF_F_HW_CSUM |  \
37                         NETIF_F_RXCSUM |  \
38                         NETIF_F_RXHASH |  \
39                         NETIF_F_SG |      \
40                         NETIF_F_TSO |     \
41                         NETIF_F_LRO |     \
42                         NETIF_F_NTUPLE |  \
43                         NETIF_F_HW_VLAN_CTAG_FILTER, \
44         .hw_priv_flags = IFF_UNICAST_FLT, \
45         .flow_control = true,             \
46         .mtu = HW_ATL_B0_MTU_JUMBO,       \
47         .mac_regs_count = 88,             \
48         .hw_alive_check_addr = 0x10U
49
50 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
51         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
52         .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
53         .link_speed_msk = AQ_NIC_RATE_10G |
54                           AQ_NIC_RATE_5G |
55                           AQ_NIC_RATE_2GS |
56                           AQ_NIC_RATE_1G |
57                           AQ_NIC_RATE_100M,
58 };
59
60 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
61         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
62         .media_type = AQ_HW_MEDIA_TYPE_TP,
63         .link_speed_msk = AQ_NIC_RATE_10G |
64                           AQ_NIC_RATE_5G |
65                           AQ_NIC_RATE_2GS |
66                           AQ_NIC_RATE_1G |
67                           AQ_NIC_RATE_100M,
68 };
69
70 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
71         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
72         .media_type = AQ_HW_MEDIA_TYPE_TP,
73         .link_speed_msk = AQ_NIC_RATE_5G |
74                           AQ_NIC_RATE_2GS |
75                           AQ_NIC_RATE_1G |
76                           AQ_NIC_RATE_100M,
77 };
78
79 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
80         DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
81         .media_type = AQ_HW_MEDIA_TYPE_TP,
82         .link_speed_msk = AQ_NIC_RATE_2GS |
83                           AQ_NIC_RATE_1G |
84                           AQ_NIC_RATE_100M,
85 };
86
87 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
88 {
89         int err = 0;
90
91         err = hw_atl_utils_soft_reset(self);
92         if (err)
93                 return err;
94
95         self->aq_fw_ops->set_state(self, MPI_RESET);
96
97         err = aq_hw_err_from_flags(self);
98
99         return err;
100 }
101
102 static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
103 {
104         hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
105         return 0;
106 }
107
108 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
109 {
110         u32 tc = 0U;
111         u32 buff_size = 0U;
112         unsigned int i_priority = 0U;
113
114         /* TPS Descriptor rate init */
115         hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
116         hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
117
118         /* TPS VM init */
119         hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
120
121         /* TPS TC credits init */
122         hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
123         hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
124
125         hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
126         hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
127         hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
128         hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
129
130         /* Tx buf size */
131         buff_size = HW_ATL_B0_TXBUF_MAX;
132
133         hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
134         hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
135                                                    (buff_size *
136                                                    (1024 / 32U) * 66U) /
137                                                    100U, tc);
138         hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
139                                                    (buff_size *
140                                                    (1024 / 32U) * 50U) /
141                                                    100U, tc);
142
143         /* QoS Rx buf size per TC */
144         tc = 0;
145         buff_size = HW_ATL_B0_RXBUF_MAX;
146
147         hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
148         hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
149                                                    (buff_size *
150                                                    (1024U / 32U) * 66U) /
151                                                    100U, tc);
152         hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
153                                                    (buff_size *
154                                                    (1024U / 32U) * 50U) /
155                                                    100U, tc);
156
157         hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
158
159         /* QoS 802.1p priority -> TC mapping */
160         for (i_priority = 8U; i_priority--;)
161                 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
162
163         return aq_hw_err_from_flags(self);
164 }
165
166 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
167                                      struct aq_rss_parameters *rss_params)
168 {
169         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
170         int err = 0;
171         unsigned int i = 0U;
172         unsigned int addr = 0U;
173         u32 val;
174
175         for (i = 10, addr = 0U; i--; ++addr) {
176                 u32 key_data = cfg->is_rss ?
177                         __swab32(rss_params->hash_secret_key[i]) : 0U;
178                 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
179                 hw_atl_rpf_rss_key_addr_set(self, addr);
180                 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
181                 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
182                                                 self, val, val == 0,
183                                                 1000U, 10000U);
184                 if (err < 0)
185                         goto err_exit;
186         }
187
188         err = aq_hw_err_from_flags(self);
189
190 err_exit:
191         return err;
192 }
193
194 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
195                                 struct aq_rss_parameters *rss_params)
196 {
197         u8 *indirection_table = rss_params->indirection_table;
198         u32 i = 0U;
199         u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
200         int err = 0;
201         u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
202                    HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
203         u32 val;
204
205         memset(bitary, 0, sizeof(bitary));
206
207         for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
208                 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
209                         ((indirection_table[i] % num_rss_queues) <<
210                         ((i * 3U) & 0xFU));
211         }
212
213         for (i = ARRAY_SIZE(bitary); i--;) {
214                 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
215                 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
216                 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
217                 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
218                                                 self, val, val == 0,
219                                                 1000U, 10000U);
220                 if (err < 0)
221                         goto err_exit;
222         }
223
224         err = aq_hw_err_from_flags(self);
225
226 err_exit:
227         return err;
228 }
229
230 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
231                                     struct aq_nic_cfg_s *aq_nic_cfg)
232 {
233         unsigned int i;
234
235         /* TX checksums offloads*/
236         hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
237         hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
238
239         /* RX checksums offloads*/
240         hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
241                                                  NETIF_F_RXCSUM));
242         hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
243                                               NETIF_F_RXCSUM));
244
245         /* LSO offloads*/
246         hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
247
248 /* LRO offloads */
249         {
250                 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
251                         ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
252                         ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
253
254                 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
255                         hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
256
257                 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
258                 hw_atl_rpo_lro_inactive_interval_set(self, 0);
259                 /* the LRO timebase divider is 5 uS (0x61a),
260                  * which is multiplied by 50(0x32)
261                  * to get a maximum coalescing interval of 250 uS,
262                  * which is the default value
263                  */
264                 hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
265
266                 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
267
268                 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
269
270                 hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
271
272                 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
273
274                 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
275
276                 hw_atl_rpo_lro_en_set(self,
277                                       aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
278                 hw_atl_itr_rsc_en_set(self,
279                                       aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
280
281                 hw_atl_itr_rsc_delay_set(self, 1U);
282         }
283         return aq_hw_err_from_flags(self);
284 }
285
286 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
287 {
288         /* Tx TC/Queue number config */
289         hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
290
291         hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
292         hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
293         hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
294
295         /* Tx interrupts */
296         hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
297
298         /* misc */
299         aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
300                         0x00010000U : 0x00000000U);
301         hw_atl_tdm_tx_dca_en_set(self, 0U);
302         hw_atl_tdm_tx_dca_mode_set(self, 0U);
303
304         hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
305
306         return aq_hw_err_from_flags(self);
307 }
308
309 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
310 {
311         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
312         int i;
313
314         /* Rx TC/RSS number config */
315         hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
316
317         /* Rx flow control */
318         hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
319
320         /* RSS Ring selection */
321         hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
322                                         0xB3333333U : 0x00000000U);
323
324         /* Multicast filters */
325         for (i = HW_ATL_B0_MAC_MAX; i--;) {
326                 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
327                 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
328         }
329
330         hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
331         hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
332
333         /* Vlan filters */
334         hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
335         hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
336
337         hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
338
339         // Always accept untagged packets
340         hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
341         hw_atl_rpf_vlan_untagged_act_set(self, 1U);
342
343         /* Rx Interrupts */
344         hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
345
346         /* misc */
347         aq_hw_write_reg(self, 0x00005040U,
348                         IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
349
350         hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
351         hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
352
353         hw_atl_rdm_rx_dca_en_set(self, 0U);
354         hw_atl_rdm_rx_dca_mode_set(self, 0U);
355
356         return aq_hw_err_from_flags(self);
357 }
358
359 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
360 {
361         int err = 0;
362         unsigned int h = 0U;
363         unsigned int l = 0U;
364
365         if (!mac_addr) {
366                 err = -EINVAL;
367                 goto err_exit;
368         }
369         h = (mac_addr[0] << 8) | (mac_addr[1]);
370         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
371                 (mac_addr[4] << 8) | mac_addr[5];
372
373         hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
374         hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
375         hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
376         hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
377
378         err = aq_hw_err_from_flags(self);
379
380 err_exit:
381         return err;
382 }
383
384 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
385 {
386         static u32 aq_hw_atl_igcr_table_[4][2] = {
387                 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
388                 [AQ_HW_IRQ_LEGACY]  = { 0x20000080U, 0x20000080U },
389                 [AQ_HW_IRQ_MSI]     = { 0x20000021U, 0x20000025U },
390                 [AQ_HW_IRQ_MSIX]    = { 0x20000022U, 0x20000026U },
391         };
392
393         int err = 0;
394         u32 val;
395
396         struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
397
398         hw_atl_b0_hw_init_tx_path(self);
399         hw_atl_b0_hw_init_rx_path(self);
400
401         hw_atl_b0_hw_mac_addr_set(self, mac_addr);
402
403         self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
404         self->aq_fw_ops->set_state(self, MPI_INIT);
405
406         hw_atl_b0_hw_qos_set(self);
407         hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
408         hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
409
410         /* Force limit MRRS on RDM/TDM to 2K */
411         val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
412         aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
413                         (val & ~0x707) | 0x404);
414
415         /* TX DMA total request limit. B0 hardware is not capable to
416          * handle more than (8K-MRRS) incoming DMA data.
417          * Value 24 in 256byte units
418          */
419         aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
420
421         /* Reset link status and read out initial hardware counters */
422         self->aq_link_status.mbps = 0;
423         self->aq_fw_ops->update_stats(self);
424
425         err = aq_hw_err_from_flags(self);
426         if (err < 0)
427                 goto err_exit;
428
429         /* Interrupts */
430         hw_atl_reg_irq_glb_ctl_set(self,
431                                    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
432                                                  [(aq_nic_cfg->vecs > 1U) ?
433                                                  1 : 0]);
434
435         hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
436
437         /* Interrupts */
438         hw_atl_reg_gen_irq_map_set(self,
439                                    ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
440                             ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
441
442         /* Enable link interrupt */
443         if (aq_nic_cfg->link_irq_vec)
444                 hw_atl_reg_gen_irq_map_set(self, BIT(7) |
445                                            aq_nic_cfg->link_irq_vec, 3U);
446
447         hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
448
449 err_exit:
450         return err;
451 }
452
453 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
454                                       struct aq_ring_s *ring)
455 {
456         hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
457         return aq_hw_err_from_flags(self);
458 }
459
460 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
461                                       struct aq_ring_s *ring)
462 {
463         hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
464         return aq_hw_err_from_flags(self);
465 }
466
467 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
468 {
469         hw_atl_tpb_tx_buff_en_set(self, 1);
470         hw_atl_rpb_rx_buff_en_set(self, 1);
471         return aq_hw_err_from_flags(self);
472 }
473
474 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
475                                             struct aq_ring_s *ring)
476 {
477         hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
478         return 0;
479 }
480
481 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
482                                      struct aq_ring_s *ring,
483                                      unsigned int frags)
484 {
485         struct aq_ring_buff_s *buff = NULL;
486         struct hw_atl_txd_s *txd = NULL;
487         unsigned int buff_pa_len = 0U;
488         unsigned int pkt_len = 0U;
489         unsigned int frag_count = 0U;
490         bool is_gso = false;
491
492         buff = &ring->buff_ring[ring->sw_tail];
493         pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
494
495         for (frag_count = 0; frag_count < frags; frag_count++) {
496                 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
497                                                 HW_ATL_B0_TXD_SIZE];
498                 txd->ctl = 0;
499                 txd->ctl2 = 0;
500                 txd->buf_addr = 0;
501
502                 buff = &ring->buff_ring[ring->sw_tail];
503
504                 if (buff->is_txc) {
505                         txd->ctl |= (buff->len_l3 << 31) |
506                                 (buff->len_l2 << 24) |
507                                 HW_ATL_B0_TXD_CTL_CMD_TCP |
508                                 HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
509                         txd->ctl2 |= (buff->mss << 16) |
510                                 (buff->len_l4 << 8) |
511                                 (buff->len_l3 >> 1);
512
513                         pkt_len -= (buff->len_l4 +
514                                     buff->len_l3 +
515                                     buff->len_l2);
516                         is_gso = true;
517
518                         if (buff->is_ipv6)
519                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
520                 } else {
521                         buff_pa_len = buff->len;
522
523                         txd->buf_addr = buff->pa;
524                         txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
525                                                 ((u32)buff_pa_len << 4));
526                         txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
527                         /* PAY_LEN */
528                         txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
529
530                         if (is_gso) {
531                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
532                                 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
533                         }
534
535                         /* Tx checksum offloads */
536                         if (buff->is_ip_cso)
537                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
538
539                         if (buff->is_udp_cso || buff->is_tcp_cso)
540                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
541
542                         if (unlikely(buff->is_eop)) {
543                                 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
544                                 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
545                                 is_gso = false;
546                         }
547                 }
548
549                 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
550         }
551
552         hw_atl_b0_hw_tx_ring_tail_update(self, ring);
553         return aq_hw_err_from_flags(self);
554 }
555
556 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
557                                      struct aq_ring_s *aq_ring,
558                                      struct aq_ring_param_s *aq_ring_param)
559 {
560         u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
561         u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
562
563         hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
564
565         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
566
567         hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
568                                                   aq_ring->idx);
569
570         hw_atl_reg_rx_dma_desc_base_addressmswset(self,
571                                                   dma_desc_addr_msw, aq_ring->idx);
572
573         hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
574
575         hw_atl_rdm_rx_desc_data_buff_size_set(self,
576                                               AQ_CFG_RX_FRAME_MAX / 1024U,
577                                        aq_ring->idx);
578
579         hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
580         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
581         hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
582
583         /* Rx ring set mode */
584
585         /* Mapping interrupt vector */
586         hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
587         hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
588
589         hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
590         hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
591         hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
592         hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
593
594         return aq_hw_err_from_flags(self);
595 }
596
597 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
598                                      struct aq_ring_s *aq_ring,
599                                      struct aq_ring_param_s *aq_ring_param)
600 {
601         u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
602         u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
603
604         hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
605                                                   aq_ring->idx);
606
607         hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
608                                                   aq_ring->idx);
609
610         hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
611
612         hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
613
614         /* Set Tx threshold */
615         hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
616
617         /* Mapping interrupt vector */
618         hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
619         hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
620
621         hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
622         hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
623
624         return aq_hw_err_from_flags(self);
625 }
626
627 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
628                                      struct aq_ring_s *ring,
629                                      unsigned int sw_tail_old)
630 {
631         for (; sw_tail_old != ring->sw_tail;
632                 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
633                 struct hw_atl_rxd_s *rxd =
634                         (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
635                                                         HW_ATL_B0_RXD_SIZE];
636
637                 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
638
639                 rxd->buf_addr = buff->pa;
640                 rxd->hdr_addr = 0U;
641         }
642
643         hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
644
645         return aq_hw_err_from_flags(self);
646 }
647
648 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
649                                             struct aq_ring_s *ring)
650 {
651         int err = 0;
652         unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
653
654         if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
655                 err = -ENXIO;
656                 goto err_exit;
657         }
658         ring->hw_head = hw_head_;
659         err = aq_hw_err_from_flags(self);
660
661 err_exit:
662         return err;
663 }
664
665 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
666                                         struct aq_ring_s *ring)
667 {
668         for (; ring->hw_head != ring->sw_tail;
669                 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
670                 struct aq_ring_buff_s *buff = NULL;
671                 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
672                         &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
673
674                 unsigned int is_rx_check_sum_enabled = 0U;
675                 unsigned int pkt_type = 0U;
676                 u8 rx_stat = 0U;
677
678                 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
679                         break;
680                 }
681
682                 buff = &ring->buff_ring[ring->hw_head];
683
684                 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
685
686                 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
687
688                 pkt_type = 0xFFU & (rxd_wb->type >> 4);
689
690                 if (is_rx_check_sum_enabled & BIT(0) &&
691                     (0x0U == (pkt_type & 0x3U)))
692                         buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
693
694                 if (is_rx_check_sum_enabled & BIT(1)) {
695                         if (0x4U == (pkt_type & 0x1CU))
696                                 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
697                                                    !!(rx_stat & BIT(3));
698                         else if (0x0U == (pkt_type & 0x1CU))
699                                 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
700                                                    !!(rx_stat & BIT(3));
701                 }
702                 buff->is_cso_err = !!(rx_stat & 0x6);
703                 /* Checksum offload workaround for small packets */
704                 if (unlikely(rxd_wb->pkt_len <= 60)) {
705                         buff->is_ip_cso = 0U;
706                         buff->is_cso_err = 0U;
707                 }
708
709                 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
710                         /* MAC error or DMA error */
711                         buff->is_error = 1U;
712                 }
713                 if (self->aq_nic_cfg->is_rss) {
714                         /* last 4 byte */
715                         u16 rss_type = rxd_wb->type & 0xFU;
716
717                         if (rss_type && rss_type < 0x8U) {
718                                 buff->is_hash_l4 = (rss_type == 0x4 ||
719                                 rss_type == 0x5);
720                                 buff->rss_hash = rxd_wb->rss_hash;
721                         }
722                 }
723
724                 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
725                         buff->len = rxd_wb->pkt_len %
726                                 AQ_CFG_RX_FRAME_MAX;
727                         buff->len = buff->len ?
728                                 buff->len : AQ_CFG_RX_FRAME_MAX;
729                         buff->next = 0U;
730                         buff->is_eop = 1U;
731                 } else {
732                         buff->len =
733                                 rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
734                                 AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
735
736                         if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
737                                 rxd_wb->status) {
738                                 /* LRO */
739                                 buff->next = rxd_wb->next_desc_ptr;
740                                 ++ring->stats.rx.lro_packets;
741                         } else {
742                                 /* jumbo */
743                                 buff->next =
744                                         aq_ring_next_dx(ring,
745                                                         ring->hw_head);
746                                 ++ring->stats.rx.jumbo_packets;
747                         }
748                 }
749         }
750
751         return aq_hw_err_from_flags(self);
752 }
753
754 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
755 {
756         hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
757         return aq_hw_err_from_flags(self);
758 }
759
760 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
761 {
762         hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
763         hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
764
765         atomic_inc(&self->dpc);
766         return aq_hw_err_from_flags(self);
767 }
768
769 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
770 {
771         *mask = hw_atl_itr_irq_statuslsw_get(self);
772         return aq_hw_err_from_flags(self);
773 }
774
775 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
776
777 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
778                                           unsigned int packet_filter)
779 {
780         unsigned int i = 0U;
781         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
782
783         hw_atl_rpfl2promiscuous_mode_en_set(self,
784                                             IS_FILTER_ENABLED(IFF_PROMISC));
785
786         hw_atl_rpf_vlan_prom_mode_en_set(self,
787                                      IS_FILTER_ENABLED(IFF_PROMISC) ||
788                                      cfg->is_vlan_force_promisc);
789
790         hw_atl_rpfl2multicast_flr_en_set(self,
791                                          IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
792
793         hw_atl_rpfl2_accept_all_mc_packets_set(self,
794                                                IS_FILTER_ENABLED(IFF_ALLMULTI));
795
796         hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
797
798         cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
799
800         for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
801                 hw_atl_rpfl2_uc_flr_en_set(self,
802                                            (cfg->is_mc_list_enabled &&
803                                             (i <= cfg->mc_list_count)) ?
804                                            1U : 0U, i);
805
806         return aq_hw_err_from_flags(self);
807 }
808
809 #undef IS_FILTER_ENABLED
810
811 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
812                                            u8 ar_mac
813                                            [AQ_HW_MULTICAST_ADDRESS_MAX]
814                                            [ETH_ALEN],
815                                            u32 count)
816 {
817         int err = 0;
818
819         if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
820                 err = -EBADRQC;
821                 goto err_exit;
822         }
823         for (self->aq_nic_cfg->mc_list_count = 0U;
824                         self->aq_nic_cfg->mc_list_count < count;
825                         ++self->aq_nic_cfg->mc_list_count) {
826                 u32 i = self->aq_nic_cfg->mc_list_count;
827                 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
828                 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
829                                         (ar_mac[i][4] << 8) | ar_mac[i][5];
830
831                 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
832
833                 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
834                                                         l, HW_ATL_B0_MAC_MIN + i);
835
836                 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
837                                                         h, HW_ATL_B0_MAC_MIN + i);
838
839                 hw_atl_rpfl2_uc_flr_en_set(self,
840                                            (self->aq_nic_cfg->is_mc_list_enabled),
841                                            HW_ATL_B0_MAC_MIN + i);
842         }
843
844         err = aq_hw_err_from_flags(self);
845
846 err_exit:
847         return err;
848 }
849
850 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
851 {
852         unsigned int i = 0U;
853         u32 itr_tx = 2U;
854         u32 itr_rx = 2U;
855
856         switch (self->aq_nic_cfg->itr) {
857         case  AQ_CFG_INTERRUPT_MODERATION_ON:
858         case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
859                 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
860                 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
861                 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
862                 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
863
864                 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
865                         /* HW timers are in 2us units */
866                         int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
867                         int tx_min_timer = tx_max_timer / 2;
868
869                         int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
870                         int rx_min_timer = rx_max_timer / 2;
871
872                         tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
873                         tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
874                         rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
875                         rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
876
877                         itr_tx |= tx_min_timer << 0x8U;
878                         itr_tx |= tx_max_timer << 0x10U;
879                         itr_rx |= rx_min_timer << 0x8U;
880                         itr_rx |= rx_max_timer << 0x10U;
881                 } else {
882                         static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
883                                 {0xfU, 0xffU}, /* 10Gbit */
884                                 {0xfU, 0x1ffU}, /* 5Gbit */
885                                 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
886                                 {0xfU, 0x1ffU}, /* 2.5Gbit */
887                                 {0xfU, 0x1ffU}, /* 1Gbit */
888                                 {0xfU, 0x1ffU}, /* 100Mbit */
889                         };
890
891                         static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
892                                 {0x6U, 0x38U},/* 10Gbit */
893                                 {0xCU, 0x70U},/* 5Gbit */
894                                 {0xCU, 0x70U},/* 5Gbit 5GS */
895                                 {0x18U, 0xE0U},/* 2.5Gbit */
896                                 {0x30U, 0x80U},/* 1Gbit */
897                                 {0x4U, 0x50U},/* 100Mbit */
898                         };
899
900                         unsigned int speed_index =
901                                         hw_atl_utils_mbps_2_speed_index(
902                                                 self->aq_link_status.mbps);
903
904                         /* Update user visible ITR settings */
905                         self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
906                                                         [speed_index][1] * 2;
907                         self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
908                                                         [speed_index][1] * 2;
909
910                         itr_tx |= hw_atl_b0_timers_table_tx_
911                                                 [speed_index][0] << 0x8U;
912                         itr_tx |= hw_atl_b0_timers_table_tx_
913                                                 [speed_index][1] << 0x10U;
914
915                         itr_rx |= hw_atl_b0_timers_table_rx_
916                                                 [speed_index][0] << 0x8U;
917                         itr_rx |= hw_atl_b0_timers_table_rx_
918                                                 [speed_index][1] << 0x10U;
919                 }
920                 break;
921         case AQ_CFG_INTERRUPT_MODERATION_OFF:
922                 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
923                 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
924                 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
925                 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
926                 itr_tx = 0U;
927                 itr_rx = 0U;
928                 break;
929         }
930
931         for (i = HW_ATL_B0_RINGS_MAX; i--;) {
932                 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
933                 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
934         }
935
936         return aq_hw_err_from_flags(self);
937 }
938
939 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
940 {
941         hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
942
943         /* Invalidate Descriptor Cache to prevent writing to the cached
944          * descriptors and to the data pointer of those descriptors
945          */
946         hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
947
948         return aq_hw_err_from_flags(self);
949 }
950
951 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
952                                      struct aq_ring_s *ring)
953 {
954         hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
955         return aq_hw_err_from_flags(self);
956 }
957
958 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
959                                      struct aq_ring_s *ring)
960 {
961         hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
962         return aq_hw_err_from_flags(self);
963 }
964
965 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
966                                     struct aq_rx_filter_l3l4 *data)
967 {
968         u8 location = data->location;
969
970         if (!data->is_ipv6) {
971                 hw_atl_rpfl3l4_cmd_clear(self, location);
972                 hw_atl_rpf_l4_spd_set(self, 0U, location);
973                 hw_atl_rpf_l4_dpd_set(self, 0U, location);
974                 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
975                 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
976         } else {
977                 int i;
978
979                 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
980                         hw_atl_rpfl3l4_cmd_clear(self, location + i);
981                         hw_atl_rpf_l4_spd_set(self, 0U, location + i);
982                         hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
983                 }
984                 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
985                 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
986         }
987
988         return aq_hw_err_from_flags(self);
989 }
990
991 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
992                                   struct aq_rx_filter_l3l4 *data)
993 {
994         u8 location = data->location;
995
996         hw_atl_b0_hw_fl3l4_clear(self, data);
997
998         if (data->cmd) {
999                 if (!data->is_ipv6) {
1000                         hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1001                                                           location,
1002                                                           data->ip_dst[0]);
1003                         hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1004                                                          location,
1005                                                          data->ip_src[0]);
1006                 } else {
1007                         hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1008                                                           location,
1009                                                           data->ip_dst);
1010                         hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1011                                                          location,
1012                                                          data->ip_src);
1013                 }
1014         }
1015         hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1016         hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1017         hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1018
1019         return aq_hw_err_from_flags(self);
1020 }
1021
1022 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1023                                 struct aq_rx_filter_l2 *data)
1024 {
1025         hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1026         hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1027         hw_atl_rpf_etht_user_priority_en_set(self,
1028                                              !!data->user_priority_en,
1029                                              data->location);
1030         if (data->user_priority_en)
1031                 hw_atl_rpf_etht_user_priority_set(self,
1032                                                   data->user_priority,
1033                                                   data->location);
1034
1035         if (data->queue < 0) {
1036                 hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1037                 hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1038         } else {
1039                 hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1040                 hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1041                 hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1042         }
1043
1044         return aq_hw_err_from_flags(self);
1045 }
1046
1047 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1048                                   struct aq_rx_filter_l2 *data)
1049 {
1050         hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1051         hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1052         hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1053
1054         return aq_hw_err_from_flags(self);
1055 }
1056
1057 /**
1058  * @brief Set VLAN filter table
1059  * @details Configure VLAN filter table to accept (and assign the queue) traffic
1060  *  for the particular vlan ids.
1061  * Note: use this function under vlan promisc mode not to lost the traffic
1062  *
1063  * @param aq_hw_s
1064  * @param aq_rx_filter_vlan VLAN filter configuration
1065  * @return 0 - OK, <0 - error
1066  */
1067 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1068                                  struct aq_rx_filter_vlan *aq_vlans)
1069 {
1070         int i;
1071
1072         for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1073                 hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1074                 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1075                 if (aq_vlans[i].enable) {
1076                         hw_atl_rpf_vlan_id_flr_set(self,
1077                                                    aq_vlans[i].vlan_id,
1078                                                    i);
1079                         hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1080                         hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1081                         if (aq_vlans[i].queue != 0xFF) {
1082                                 hw_atl_rpf_vlan_rxq_flr_set(self,
1083                                                             aq_vlans[i].queue,
1084                                                             i);
1085                                 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1086                         }
1087                 }
1088         }
1089
1090         return aq_hw_err_from_flags(self);
1091 }
1092
1093 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1094 {
1095         /* set promisc in case of disabing the vland filter */
1096         hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1097
1098         return aq_hw_err_from_flags(self);
1099 }
1100
1101 const struct aq_hw_ops hw_atl_ops_b0 = {
1102         .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
1103         .hw_init              = hw_atl_b0_hw_init,
1104         .hw_reset             = hw_atl_b0_hw_reset,
1105         .hw_start             = hw_atl_b0_hw_start,
1106         .hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
1107         .hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
1108         .hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
1109         .hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
1110         .hw_stop              = hw_atl_b0_hw_stop,
1111
1112         .hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
1113         .hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
1114
1115         .hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
1116         .hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
1117
1118         .hw_irq_enable           = hw_atl_b0_hw_irq_enable,
1119         .hw_irq_disable          = hw_atl_b0_hw_irq_disable,
1120         .hw_irq_read             = hw_atl_b0_hw_irq_read,
1121
1122         .hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
1123         .hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
1124         .hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
1125         .hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
1126         .hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
1127         .hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
1128         .hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
1129         .hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
1130         .hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
1131         .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1132         .hw_rss_set                  = hw_atl_b0_hw_rss_set,
1133         .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
1134         .hw_get_regs                 = hw_atl_utils_hw_get_regs,
1135         .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
1136         .hw_get_fw_version           = hw_atl_utils_get_fw_version,
1137         .hw_set_offload              = hw_atl_b0_hw_offload_set,
1138         .hw_set_fc                   = hw_atl_b0_set_fc,
1139 };