1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "../aq_phy.h"
14 #include "hw_atl_b0.h"
15 #include "hw_atl_utils.h"
16 #include "hw_atl_llh.h"
17 #include "hw_atl_b0_internal.h"
18 #include "hw_atl_llh_internal.h"
20 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
24 .vecs = HW_ATL_B0_RSS_MAX, \
25 .tcs = HW_ATL_B0_TC_MAX, \
26 .rxd_alignment = 1U, \
27 .rxd_size = HW_ATL_B0_RXD_SIZE, \
28 .rxds_max = HW_ATL_B0_MAX_RXD, \
29 .rxds_min = HW_ATL_B0_MIN_RXD, \
30 .txd_alignment = 1U, \
31 .txd_size = HW_ATL_B0_TXD_SIZE, \
32 .txds_max = HW_ATL_B0_MAX_TXD, \
33 .txds_min = HW_ATL_B0_MIN_TXD, \
34 .txhwb_alignment = 4096U, \
35 .tx_rings = HW_ATL_B0_TX_RINGS, \
36 .rx_rings = HW_ATL_B0_RX_RINGS, \
37 .hw_features = NETIF_F_HW_CSUM | \
44 NETIF_F_HW_VLAN_CTAG_FILTER | \
45 NETIF_F_HW_VLAN_CTAG_RX | \
46 NETIF_F_HW_VLAN_CTAG_TX, \
47 .hw_priv_flags = IFF_UNICAST_FLT, \
48 .flow_control = true, \
49 .mtu = HW_ATL_B0_MTU_JUMBO, \
50 .mac_regs_count = 88, \
51 .hw_alive_check_addr = 0x10U
53 #define FRAC_PER_NS 0x100000000LL
55 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
56 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
57 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
58 .link_speed_msk = AQ_NIC_RATE_10G |
65 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
66 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
67 .media_type = AQ_HW_MEDIA_TYPE_TP,
68 .link_speed_msk = AQ_NIC_RATE_10G |
75 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
76 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
77 .media_type = AQ_HW_MEDIA_TYPE_TP,
78 .link_speed_msk = AQ_NIC_RATE_5G |
84 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
85 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
86 .media_type = AQ_HW_MEDIA_TYPE_TP,
87 .link_speed_msk = AQ_NIC_RATE_2GS |
92 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
96 err = hw_atl_utils_soft_reset(self);
100 self->aq_fw_ops->set_state(self, MPI_RESET);
102 err = aq_hw_err_from_flags(self);
107 static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
109 hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
113 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
117 unsigned int i_priority = 0U;
119 /* TPS Descriptor rate init */
120 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
121 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
124 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
126 /* TPS TC credits init */
127 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
128 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
132 /* TX Packet Scheduler Data TC0 */
133 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
134 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
135 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
136 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
138 /* Tx buf size TC0 */
139 buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
141 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
142 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
144 (1024 / 32U) * 66U) /
146 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
148 (1024 / 32U) * 50U) /
150 /* Init TC2 for PTP_TX */
153 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
156 /* QoS Rx buf size per TC */
158 buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
160 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
161 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
163 (1024U / 32U) * 66U) /
165 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
167 (1024U / 32U) * 50U) /
170 hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
172 /* Init TC2 for PTP_RX */
175 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
177 /* No flow control for PTP */
178 hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
180 /* QoS 802.1p priority -> TC mapping */
181 for (i_priority = 8U; i_priority--;)
182 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
184 return aq_hw_err_from_flags(self);
187 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
188 struct aq_rss_parameters *rss_params)
190 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
193 unsigned int addr = 0U;
196 for (i = 10, addr = 0U; i--; ++addr) {
197 u32 key_data = cfg->is_rss ?
198 __swab32(rss_params->hash_secret_key[i]) : 0U;
199 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
200 hw_atl_rpf_rss_key_addr_set(self, addr);
201 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
202 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
209 err = aq_hw_err_from_flags(self);
215 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
216 struct aq_rss_parameters *rss_params)
218 u8 *indirection_table = rss_params->indirection_table;
220 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
222 u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
223 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
226 memset(bitary, 0, sizeof(bitary));
228 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
229 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
230 ((indirection_table[i] % num_rss_queues) <<
234 for (i = ARRAY_SIZE(bitary); i--;) {
235 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
236 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
237 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
238 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
245 err = aq_hw_err_from_flags(self);
251 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
252 struct aq_nic_cfg_s *aq_nic_cfg)
256 /* TX checksums offloads*/
257 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
258 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
260 /* RX checksums offloads*/
261 hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
263 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
267 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
269 /* Outer VLAN tag offload */
270 hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
274 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
275 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
276 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
278 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
279 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
281 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
282 hw_atl_rpo_lro_inactive_interval_set(self, 0);
283 /* the LRO timebase divider is 5 uS (0x61a),
284 * which is multiplied by 50(0x32)
285 * to get a maximum coalescing interval of 250 uS,
286 * which is the default value
288 hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
290 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
292 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
294 hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
296 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
298 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
300 hw_atl_rpo_lro_en_set(self,
301 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
302 hw_atl_itr_rsc_en_set(self,
303 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
305 hw_atl_itr_rsc_delay_set(self, 1U);
307 return aq_hw_err_from_flags(self);
310 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
312 /* Tx TC/Queue number config */
313 hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
315 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
316 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
317 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
320 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
323 aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
324 0x00010000U : 0x00000000U);
325 hw_atl_tdm_tx_dca_en_set(self, 0U);
326 hw_atl_tdm_tx_dca_mode_set(self, 0U);
328 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
330 return aq_hw_err_from_flags(self);
333 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
335 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
338 /* Rx TC/RSS number config */
339 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
341 /* Rx flow control */
342 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
344 /* RSS Ring selection */
345 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
346 0xB3333333U : 0x00000000U);
348 /* Multicast filters */
349 for (i = HW_ATL_B0_MAC_MAX; i--;) {
350 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
351 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
354 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
355 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
358 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
359 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
361 hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
363 // Always accept untagged packets
364 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
365 hw_atl_rpf_vlan_untagged_act_set(self, 1U);
368 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
371 aq_hw_write_reg(self, 0x00005040U,
372 IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
374 hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
375 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
377 hw_atl_rdm_rx_dca_en_set(self, 0U);
378 hw_atl_rdm_rx_dca_mode_set(self, 0U);
380 return aq_hw_err_from_flags(self);
383 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
393 h = (mac_addr[0] << 8) | (mac_addr[1]);
394 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
395 (mac_addr[4] << 8) | mac_addr[5];
397 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
398 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
399 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
400 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
402 err = aq_hw_err_from_flags(self);
408 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
410 static u32 aq_hw_atl_igcr_table_[4][2] = {
411 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
412 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
413 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
414 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
420 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
422 hw_atl_b0_hw_init_tx_path(self);
423 hw_atl_b0_hw_init_rx_path(self);
425 hw_atl_b0_hw_mac_addr_set(self, mac_addr);
427 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
428 self->aq_fw_ops->set_state(self, MPI_INIT);
430 hw_atl_b0_hw_qos_set(self);
431 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
432 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
434 /* Force limit MRRS on RDM/TDM to 2K */
435 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
436 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
437 (val & ~0x707) | 0x404);
439 /* TX DMA total request limit. B0 hardware is not capable to
440 * handle more than (8K-MRRS) incoming DMA data.
441 * Value 24 in 256byte units
443 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
445 /* Reset link status and read out initial hardware counters */
446 self->aq_link_status.mbps = 0;
447 self->aq_fw_ops->update_stats(self);
449 err = aq_hw_err_from_flags(self);
454 hw_atl_reg_irq_glb_ctl_set(self,
455 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
456 [(aq_nic_cfg->vecs > 1U) ?
459 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
462 hw_atl_reg_gen_irq_map_set(self,
463 ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
464 ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
466 /* Enable link interrupt */
467 if (aq_nic_cfg->link_irq_vec)
468 hw_atl_reg_gen_irq_map_set(self, BIT(7) |
469 aq_nic_cfg->link_irq_vec, 3U);
471 hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
477 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
478 struct aq_ring_s *ring)
480 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
481 return aq_hw_err_from_flags(self);
484 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
485 struct aq_ring_s *ring)
487 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
488 return aq_hw_err_from_flags(self);
491 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
493 hw_atl_tpb_tx_buff_en_set(self, 1);
494 hw_atl_rpb_rx_buff_en_set(self, 1);
495 return aq_hw_err_from_flags(self);
498 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
499 struct aq_ring_s *ring)
501 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
505 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
506 struct aq_ring_s *ring,
509 struct aq_ring_buff_s *buff = NULL;
510 struct hw_atl_txd_s *txd = NULL;
511 unsigned int buff_pa_len = 0U;
512 unsigned int pkt_len = 0U;
513 unsigned int frag_count = 0U;
514 bool is_vlan = false;
517 buff = &ring->buff_ring[ring->sw_tail];
518 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
520 for (frag_count = 0; frag_count < frags; frag_count++) {
521 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
527 buff = &ring->buff_ring[ring->sw_tail];
530 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
531 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
532 txd->ctl |= (buff->len_l3 << 31) |
533 (buff->len_l2 << 24);
534 txd->ctl2 |= (buff->mss << 16);
537 pkt_len -= (buff->len_l4 +
541 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
542 txd->ctl2 |= (buff->len_l4 << 8) |
546 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
547 txd->ctl |= buff->vlan_tx_tag << 4;
550 if (!buff->is_gso && !buff->is_vlan) {
551 buff_pa_len = buff->len;
553 txd->buf_addr = buff->pa;
554 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
555 ((u32)buff_pa_len << 4));
556 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
559 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
561 if (is_gso || is_vlan) {
562 /* enable tx context */
563 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
566 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
568 /* Tx checksum offloads */
570 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
572 if (buff->is_udp_cso || buff->is_tcp_cso)
573 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
576 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
578 if (unlikely(buff->is_eop)) {
579 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
580 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
585 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
588 hw_atl_b0_hw_tx_ring_tail_update(self, ring);
589 return aq_hw_err_from_flags(self);
592 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
593 struct aq_ring_s *aq_ring,
594 struct aq_ring_param_s *aq_ring_param)
596 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
597 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
598 u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
600 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
602 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
604 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
607 hw_atl_reg_rx_dma_desc_base_addressmswset(self,
608 dma_desc_addr_msw, aq_ring->idx);
610 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
612 hw_atl_rdm_rx_desc_data_buff_size_set(self,
613 AQ_CFG_RX_FRAME_MAX / 1024U,
616 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
617 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
618 hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
621 /* Rx ring set mode */
623 /* Mapping interrupt vector */
624 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
625 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
627 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
628 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
629 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
630 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
632 return aq_hw_err_from_flags(self);
635 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
636 struct aq_ring_s *aq_ring,
637 struct aq_ring_param_s *aq_ring_param)
639 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
640 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
642 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
645 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
648 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
650 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
652 /* Set Tx threshold */
653 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
655 /* Mapping interrupt vector */
656 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
657 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
659 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
660 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
662 return aq_hw_err_from_flags(self);
665 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
666 struct aq_ring_s *ring,
667 unsigned int sw_tail_old)
669 for (; sw_tail_old != ring->sw_tail;
670 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
671 struct hw_atl_rxd_s *rxd =
672 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
675 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
677 rxd->buf_addr = buff->pa;
681 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
683 return aq_hw_err_from_flags(self);
686 static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
687 struct aq_ring_s *ring)
691 for (i = aq_ring_avail_dx(ring); i--;
692 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
693 struct hw_atl_rxd_s *rxd =
694 (struct hw_atl_rxd_s *)
695 &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
697 rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
700 /* Make sure descriptors are updated before bump tail*/
703 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
705 return aq_hw_err_from_flags(self);
708 static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
709 struct aq_ring_s *ring)
711 while (ring->hw_head != ring->sw_tail) {
712 struct hw_atl_rxd_hwts_wb_s *hwts_wb =
713 (struct hw_atl_rxd_hwts_wb_s *)
714 (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
716 /* RxD is not done */
717 if (!(hwts_wb->sec_lw0 & 0x1U))
720 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
723 return aq_hw_err_from_flags(self);
726 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
727 struct aq_ring_s *ring)
730 unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
732 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
736 ring->hw_head = hw_head_;
737 err = aq_hw_err_from_flags(self);
743 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
744 struct aq_ring_s *ring)
746 for (; ring->hw_head != ring->sw_tail;
747 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
748 struct aq_ring_buff_s *buff = NULL;
749 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
750 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
752 unsigned int is_rx_check_sum_enabled = 0U;
753 unsigned int pkt_type = 0U;
756 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
760 buff = &ring->buff_ring[ring->hw_head];
763 buff->is_hash_l4 = 0U;
765 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
767 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
769 pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
770 HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
772 if (is_rx_check_sum_enabled & BIT(0) &&
773 (0x0U == (pkt_type & 0x3U)))
774 buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
776 if (is_rx_check_sum_enabled & BIT(1)) {
777 if (0x4U == (pkt_type & 0x1CU))
778 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
779 !!(rx_stat & BIT(3));
780 else if (0x0U == (pkt_type & 0x1CU))
781 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
782 !!(rx_stat & BIT(3));
784 buff->is_cso_err = !!(rx_stat & 0x6);
785 /* Checksum offload workaround for small packets */
786 if (unlikely(rxd_wb->pkt_len <= 60)) {
787 buff->is_ip_cso = 0U;
788 buff->is_cso_err = 0U;
791 if (self->aq_nic_cfg->is_vlan_rx_strip &&
792 ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
793 (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
795 buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
798 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
799 /* MAC error or DMA error */
802 if (self->aq_nic_cfg->is_rss) {
804 u16 rss_type = rxd_wb->type & 0xFU;
806 if (rss_type && rss_type < 0x8U) {
807 buff->is_hash_l4 = (rss_type == 0x4 ||
809 buff->rss_hash = rxd_wb->rss_hash;
813 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
814 buff->len = rxd_wb->pkt_len %
816 buff->len = buff->len ?
817 buff->len : AQ_CFG_RX_FRAME_MAX;
822 rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
823 AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
825 if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
828 buff->next = rxd_wb->next_desc_ptr;
829 ++ring->stats.rx.lro_packets;
833 aq_ring_next_dx(ring,
835 ++ring->stats.rx.jumbo_packets;
840 return aq_hw_err_from_flags(self);
843 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
845 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
846 return aq_hw_err_from_flags(self);
849 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
851 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
852 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
854 atomic_inc(&self->dpc);
855 return aq_hw_err_from_flags(self);
858 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
860 *mask = hw_atl_itr_irq_statuslsw_get(self);
861 return aq_hw_err_from_flags(self);
864 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
866 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
867 unsigned int packet_filter)
870 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
872 hw_atl_rpfl2promiscuous_mode_en_set(self,
873 IS_FILTER_ENABLED(IFF_PROMISC));
875 hw_atl_rpf_vlan_prom_mode_en_set(self,
876 IS_FILTER_ENABLED(IFF_PROMISC) ||
877 cfg->is_vlan_force_promisc);
879 hw_atl_rpfl2multicast_flr_en_set(self,
880 IS_FILTER_ENABLED(IFF_ALLMULTI) &&
881 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
883 hw_atl_rpfl2_accept_all_mc_packets_set(self,
884 IS_FILTER_ENABLED(IFF_ALLMULTI) &&
885 IS_FILTER_ENABLED(IFF_MULTICAST));
887 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
890 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
891 hw_atl_rpfl2_uc_flr_en_set(self,
892 (cfg->is_mc_list_enabled &&
893 (i <= cfg->mc_list_count)) ?
896 return aq_hw_err_from_flags(self);
899 #undef IS_FILTER_ENABLED
901 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
903 [AQ_HW_MULTICAST_ADDRESS_MAX]
909 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
913 for (self->aq_nic_cfg->mc_list_count = 0U;
914 self->aq_nic_cfg->mc_list_count < count;
915 ++self->aq_nic_cfg->mc_list_count) {
916 u32 i = self->aq_nic_cfg->mc_list_count;
917 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
918 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
919 (ar_mac[i][4] << 8) | ar_mac[i][5];
921 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
923 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
924 l, HW_ATL_B0_MAC_MIN + i);
926 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
927 h, HW_ATL_B0_MAC_MIN + i);
929 hw_atl_rpfl2_uc_flr_en_set(self,
930 (self->aq_nic_cfg->is_mc_list_enabled),
931 HW_ATL_B0_MAC_MIN + i);
934 err = aq_hw_err_from_flags(self);
940 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
946 switch (self->aq_nic_cfg->itr) {
947 case AQ_CFG_INTERRUPT_MODERATION_ON:
948 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
949 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
950 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
951 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
952 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
954 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
955 /* HW timers are in 2us units */
956 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
957 int tx_min_timer = tx_max_timer / 2;
959 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
960 int rx_min_timer = rx_max_timer / 2;
962 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
963 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
964 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
965 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
967 itr_tx |= tx_min_timer << 0x8U;
968 itr_tx |= tx_max_timer << 0x10U;
969 itr_rx |= rx_min_timer << 0x8U;
970 itr_rx |= rx_max_timer << 0x10U;
972 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
973 {0xfU, 0xffU}, /* 10Gbit */
974 {0xfU, 0x1ffU}, /* 5Gbit */
975 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
976 {0xfU, 0x1ffU}, /* 2.5Gbit */
977 {0xfU, 0x1ffU}, /* 1Gbit */
978 {0xfU, 0x1ffU}, /* 100Mbit */
981 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
982 {0x6U, 0x38U},/* 10Gbit */
983 {0xCU, 0x70U},/* 5Gbit */
984 {0xCU, 0x70U},/* 5Gbit 5GS */
985 {0x18U, 0xE0U},/* 2.5Gbit */
986 {0x30U, 0x80U},/* 1Gbit */
987 {0x4U, 0x50U},/* 100Mbit */
990 unsigned int speed_index =
991 hw_atl_utils_mbps_2_speed_index(
992 self->aq_link_status.mbps);
994 /* Update user visible ITR settings */
995 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
996 [speed_index][1] * 2;
997 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
998 [speed_index][1] * 2;
1000 itr_tx |= hw_atl_b0_timers_table_tx_
1001 [speed_index][0] << 0x8U;
1002 itr_tx |= hw_atl_b0_timers_table_tx_
1003 [speed_index][1] << 0x10U;
1005 itr_rx |= hw_atl_b0_timers_table_rx_
1006 [speed_index][0] << 0x8U;
1007 itr_rx |= hw_atl_b0_timers_table_rx_
1008 [speed_index][1] << 0x10U;
1011 case AQ_CFG_INTERRUPT_MODERATION_OFF:
1012 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
1013 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
1014 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
1015 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
1021 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
1022 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
1023 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
1026 return aq_hw_err_from_flags(self);
1029 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
1034 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
1036 /* Invalidate Descriptor Cache to prevent writing to the cached
1037 * descriptors and to the data pointer of those descriptors
1039 hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
1041 err = aq_hw_err_from_flags(self);
1046 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
1047 self, val, val == 1, 1000U, 10000U);
1053 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
1054 struct aq_ring_s *ring)
1056 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
1057 return aq_hw_err_from_flags(self);
1060 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
1061 struct aq_ring_s *ring)
1063 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
1064 return aq_hw_err_from_flags(self);
1067 static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1069 *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
1070 return aq_hw_err_from_flags(self);
1073 static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1075 *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
1076 return aq_hw_err_from_flags(self);
1079 #define get_ptp_ts_val_u64(self, indx) \
1080 ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1082 static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
1086 hw_atl_pcs_ptp_clock_read_enable(self, 1);
1087 hw_atl_pcs_ptp_clock_read_enable(self, 0);
1088 ns = (get_ptp_ts_val_u64(self, 0) +
1089 (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
1090 (get_ptp_ts_val_u64(self, 3) +
1091 (get_ptp_ts_val_u64(self, 4) << 16));
1093 *stamp = ns + self->ptp_clk_offset;
1096 static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
1098 /* For accuracy, the digit is extended */
1099 s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
1103 base_ns = div64_s64(base_ns, freq);
1104 nsi = div64_u64(base_ns, NSEC_PER_SEC);
1106 if (base_ns != nsi * NSEC_PER_SEC) {
1107 s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
1108 base_ns - nsi * NSEC_PER_SEC);
1109 nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
1113 *fns = (u32)nsi_frac;
1117 hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
1118 u64 phyfreq, u64 macfreq)
1121 s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
1122 FRAC_PER_NS * ptp_adj_freq->ns_phy);
1123 s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
1124 FRAC_PER_NS * ptp_adj_freq->ns_mac);
1125 s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
1126 s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
1127 /* MAC MCP counter freq is macfreq / 4 */
1128 s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
1131 diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
1132 AQ_HW_MAC_COUNTER_HZ);
1133 adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
1134 ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
1136 ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
1137 ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
1141 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
1143 self->ptp_clk_offset += delta;
1148 static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
1150 s64 delta = time - (self->ptp_clk_offset + ts);
1152 return hw_atl_b0_adj_sys_clock(self, delta);
1155 static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
1157 *time = self->ptp_clk_offset + ts;
1161 static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
1163 struct hw_fw_request_iface fwreq;
1166 memset(&fwreq, 0, sizeof(fwreq));
1168 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
1169 hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
1170 &fwreq.ptp_adj_freq.ns_mac,
1171 &fwreq.ptp_adj_freq.fns_mac);
1172 hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
1173 &fwreq.ptp_adj_freq.ns_phy,
1174 &fwreq.ptp_adj_freq.fns_phy);
1175 hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
1176 AQ_HW_PHY_COUNTER_HZ,
1177 AQ_HW_MAC_COUNTER_HZ);
1179 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
1180 return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1183 static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
1184 u64 start, u32 period)
1186 struct hw_fw_request_iface fwreq;
1189 memset(&fwreq, 0, sizeof(fwreq));
1191 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
1192 fwreq.ptp_gpio_ctrl.index = index;
1193 fwreq.ptp_gpio_ctrl.period = period;
1194 /* Apply time offset */
1195 fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
1197 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
1198 return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1201 static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
1204 /* Enable/disable Sync1588 GPIO Timestamping */
1205 aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
1210 static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
1220 /* PTP external GPIO clock seconds count 15:0 */
1221 sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
1222 /* PTP external GPIO clock seconds count 31:16 */
1223 sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
1224 /* PTP external GPIO clock nanoseconds count 15:0 */
1225 nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
1226 /* PTP external GPIO clock nanoseconds count 31:16 */
1227 nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
1229 *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
1234 static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
1235 unsigned int len, u64 *timestamp)
1237 unsigned int offset = 14;
1243 if (len <= offset || !timestamp)
1246 /* The TIMESTAMP in the end of package has following format:
1251 * uint16_t stream_id;
1254 ptr = p + (len - offset);
1255 memcpy(&sec, ptr, sizeof(sec));
1257 memcpy(&ns, ptr, sizeof(ns));
1259 *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
1260 be32_to_cpu(ns) + self->ptp_clk_offset;
1262 eth = (struct ethhdr *)p;
1264 return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
1267 static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
1270 struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
1274 tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
1276 tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
1278 tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
1280 tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
1282 ns = sec * NSEC_PER_SEC + hwts_wb->ns;
1284 *timestamp = ns + self->ptp_clk_offset;
1288 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
1289 struct aq_rx_filter_l3l4 *data)
1291 u8 location = data->location;
1293 if (!data->is_ipv6) {
1294 hw_atl_rpfl3l4_cmd_clear(self, location);
1295 hw_atl_rpf_l4_spd_set(self, 0U, location);
1296 hw_atl_rpf_l4_dpd_set(self, 0U, location);
1297 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
1298 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
1302 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
1303 hw_atl_rpfl3l4_cmd_clear(self, location + i);
1304 hw_atl_rpf_l4_spd_set(self, 0U, location + i);
1305 hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
1307 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
1308 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
1311 return aq_hw_err_from_flags(self);
1314 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
1315 struct aq_rx_filter_l3l4 *data)
1317 u8 location = data->location;
1319 hw_atl_b0_hw_fl3l4_clear(self, data);
1321 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
1322 HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
1323 if (!data->is_ipv6) {
1324 hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1327 hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1331 hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1334 hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1340 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
1341 HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
1342 hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1343 hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1346 hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1348 return aq_hw_err_from_flags(self);
1351 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1352 struct aq_rx_filter_l2 *data)
1354 hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1355 hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1356 hw_atl_rpf_etht_user_priority_en_set(self,
1357 !!data->user_priority_en,
1359 if (data->user_priority_en)
1360 hw_atl_rpf_etht_user_priority_set(self,
1361 data->user_priority,
1364 if (data->queue < 0) {
1365 hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1366 hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1368 hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1369 hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1370 hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1373 return aq_hw_err_from_flags(self);
1376 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1377 struct aq_rx_filter_l2 *data)
1379 hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1380 hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1381 hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1383 return aq_hw_err_from_flags(self);
1387 * @brief Set VLAN filter table
1388 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1389 * for the particular vlan ids.
1390 * Note: use this function under vlan promisc mode not to lost the traffic
1393 * @param aq_rx_filter_vlan VLAN filter configuration
1394 * @return 0 - OK, <0 - error
1396 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1397 struct aq_rx_filter_vlan *aq_vlans)
1401 for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1402 hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1403 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1404 if (aq_vlans[i].enable) {
1405 hw_atl_rpf_vlan_id_flr_set(self,
1406 aq_vlans[i].vlan_id,
1408 hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1409 hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1410 if (aq_vlans[i].queue != 0xFF) {
1411 hw_atl_rpf_vlan_rxq_flr_set(self,
1414 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1419 return aq_hw_err_from_flags(self);
1422 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1424 /* set promisc in case of disabing the vland filter */
1425 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1427 return aq_hw_err_from_flags(self);
1430 static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
1433 case AQ_HW_LOOPBACK_DMA_SYS:
1434 hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
1435 hw_atl_rpb_dma_sys_lbk_set(self, enable);
1437 case AQ_HW_LOOPBACK_PKT_SYS:
1438 hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
1439 hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
1441 case AQ_HW_LOOPBACK_DMA_NET:
1442 hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
1443 hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
1444 hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
1445 hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
1446 hw_atl_rpb_dma_net_lbk_set(self, enable);
1454 const struct aq_hw_ops hw_atl_ops_b0 = {
1455 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
1456 .hw_init = hw_atl_b0_hw_init,
1457 .hw_reset = hw_atl_b0_hw_reset,
1458 .hw_start = hw_atl_b0_hw_start,
1459 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
1460 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
1461 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
1462 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
1463 .hw_stop = hw_atl_b0_hw_stop,
1465 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
1466 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
1468 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
1469 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
1471 .hw_irq_enable = hw_atl_b0_hw_irq_enable,
1472 .hw_irq_disable = hw_atl_b0_hw_irq_disable,
1473 .hw_irq_read = hw_atl_b0_hw_irq_read,
1475 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
1476 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
1477 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
1478 .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
1479 .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
1480 .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
1481 .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
1482 .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
1483 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
1484 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1485 .hw_rss_set = hw_atl_b0_hw_rss_set,
1486 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
1487 .hw_get_regs = hw_atl_utils_hw_get_regs,
1488 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
1489 .hw_get_fw_version = hw_atl_utils_get_fw_version,
1491 .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
1492 .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
1494 .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
1495 .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
1497 .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
1498 .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
1499 .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
1500 .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
1501 .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
1502 .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
1503 .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
1504 .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
1505 .rx_extract_ts = hw_atl_b0_rx_extract_ts,
1506 .extract_hwts = hw_atl_b0_extract_hwts,
1507 .hw_set_offload = hw_atl_b0_hw_offload_set,
1508 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
1509 .hw_get_fw_version = hw_atl_utils_get_fw_version,
1510 .hw_set_offload = hw_atl_b0_hw_offload_set,
1511 .hw_set_loopback = hw_atl_b0_set_loopback,
1512 .hw_set_fc = hw_atl_b0_set_fc,