1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
11 #include "../aq_hw_utils.h"
12 #include "../aq_ring.h"
13 #include "../aq_nic.h"
14 #include "../aq_phy.h"
15 #include "hw_atl_b0.h"
16 #include "hw_atl_utils.h"
17 #include "hw_atl_llh.h"
18 #include "hw_atl_b0_internal.h"
19 #include "hw_atl_llh_internal.h"
21 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
26 .vecs = HW_ATL_B0_RSS_MAX, \
27 .tcs_max = HW_ATL_B0_TC_MAX, \
28 .rxd_alignment = 1U, \
29 .rxd_size = HW_ATL_B0_RXD_SIZE, \
30 .rxds_max = HW_ATL_B0_MAX_RXD, \
31 .rxds_min = HW_ATL_B0_MIN_RXD, \
32 .txd_alignment = 1U, \
33 .txd_size = HW_ATL_B0_TXD_SIZE, \
34 .txds_max = HW_ATL_B0_MAX_TXD, \
35 .txds_min = HW_ATL_B0_MIN_TXD, \
36 .txhwb_alignment = 4096U, \
37 .tx_rings = HW_ATL_B0_TX_RINGS, \
38 .rx_rings = HW_ATL_B0_RX_RINGS, \
39 .hw_features = NETIF_F_HW_CSUM | \
47 NETIF_F_HW_VLAN_CTAG_FILTER | \
48 NETIF_F_HW_VLAN_CTAG_RX | \
49 NETIF_F_HW_VLAN_CTAG_TX | \
50 NETIF_F_GSO_UDP_L4 | \
51 NETIF_F_GSO_PARTIAL | \
53 .hw_priv_flags = IFF_UNICAST_FLT, \
54 .flow_control = true, \
55 .mtu = HW_ATL_B0_MTU_JUMBO, \
56 .mac_regs_count = 88, \
57 .hw_alive_check_addr = 0x10U
59 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
61 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
62 .link_speed_msk = AQ_NIC_RATE_10G |
69 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
71 .media_type = AQ_HW_MEDIA_TYPE_TP,
72 .link_speed_msk = AQ_NIC_RATE_10G |
79 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
80 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
81 .media_type = AQ_HW_MEDIA_TYPE_TP,
82 .link_speed_msk = AQ_NIC_RATE_5G |
88 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
89 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
90 .media_type = AQ_HW_MEDIA_TYPE_TP,
91 .link_speed_msk = AQ_NIC_RATE_2G5 |
96 const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
97 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
98 .media_type = AQ_HW_MEDIA_TYPE_TP,
99 .link_speed_msk = AQ_NIC_RATE_5G |
103 .quirks = AQ_NIC_QUIRK_BAD_PTP,
106 const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
107 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
108 .media_type = AQ_HW_MEDIA_TYPE_TP,
109 .link_speed_msk = AQ_NIC_RATE_2G5 |
112 .quirks = AQ_NIC_QUIRK_BAD_PTP,
115 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
119 err = hw_atl_utils_soft_reset(self);
123 self->aq_fw_ops->set_state(self, MPI_RESET);
125 err = aq_hw_err_from_flags(self);
130 int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
132 hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
137 static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
139 /* Init TC2 for PTP_TX */
140 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
143 /* Init TC2 for PTP_RX */
144 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
146 /* No flow control for PTP */
147 hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
149 return aq_hw_err_from_flags(self);
152 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
154 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
155 u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
156 u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
157 unsigned int prio = 0U;
161 tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
162 rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
165 /* TPS Descriptor rate init */
166 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
167 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
170 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
172 tx_buff_size /= cfg->tcs;
173 rx_buff_size /= cfg->tcs;
174 for (tc = 0; tc < cfg->tcs; tc++) {
177 /* Tx buf size TC0 */
178 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
180 threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
181 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
183 threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
184 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
186 /* QoS Rx buf size per TC */
187 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
189 threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
190 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
192 threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
193 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
195 hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
199 hw_atl_b0_tc_ptp_set(self);
201 /* QoS 802.1p priority -> TC mapping */
202 for (prio = 0; prio < 8; ++prio)
203 hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
204 cfg->prio_tc_map[prio]);
206 return aq_hw_err_from_flags(self);
209 int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
210 struct aq_rss_parameters *rss_params)
212 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
213 unsigned int addr = 0U;
218 for (i = 10, addr = 0U; i--; ++addr) {
219 u32 key_data = cfg->is_rss ?
220 __swab32(rss_params->hash_secret_key[i]) : 0U;
221 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
222 hw_atl_rpf_rss_key_addr_set(self, addr);
223 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
224 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
231 err = aq_hw_err_from_flags(self);
237 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
238 struct aq_rss_parameters *rss_params)
240 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
241 u8 *indirection_table = rss_params->indirection_table;
242 u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
243 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
248 memset(bitary, 0, sizeof(bitary));
250 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
251 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
252 ((indirection_table[i] % num_rss_queues) <<
256 for (i = ARRAY_SIZE(bitary); i--;) {
257 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
258 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
259 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
260 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
267 err = aq_hw_err_from_flags(self);
273 int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
274 struct aq_nic_cfg_s *aq_nic_cfg)
276 u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
279 /* TX checksums offloads*/
280 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
281 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
283 /* RX checksums offloads*/
284 hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
285 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
288 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
290 /* Outer VLAN tag offload */
291 hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
295 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
296 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
297 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
299 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
300 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
302 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
303 hw_atl_rpo_lro_inactive_interval_set(self, 0);
304 /* the LRO timebase divider is 5 uS (0x61a),
305 * which is multiplied by 50(0x32)
306 * to get a maximum coalescing interval of 250 uS,
307 * which is the default value
309 hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
311 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
313 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
315 hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
317 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
319 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
321 hw_atl_rpo_lro_en_set(self,
322 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
323 hw_atl_itr_rsc_en_set(self,
324 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
326 hw_atl_itr_rsc_delay_set(self, 1U);
329 return aq_hw_err_from_flags(self);
332 static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
334 static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
335 /* Scale factor is based on the number of bits in fractional portion */
336 static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
337 static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
338 HW_ATL_TPS_DESC_RATE_Y_SHIFT;
339 const u32 link_speed = self->aq_link_status.mbps;
340 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
341 unsigned long num_min_rated_tcs = 0;
342 u32 tc_weight[AQ_CFG_TCS_MAX];
343 u32 fixed_max_credit;
348 /* By default max_credit is based upon MTU (in unit of 64b) */
349 fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
352 min_rate_msk = nic_cfg->tc_min_rate_msk &
353 (BIT(nic_cfg->tcs) - 1);
354 num_min_rated_tcs = hweight8(min_rate_msk);
357 /* First, calculate weights where min_rate is specified */
358 if (num_min_rated_tcs) {
359 for (tc = 0; tc != nic_cfg->tcs; tc++) {
360 if (!nic_cfg->tc_min_rate[tc]) {
365 tc_weight[tc] = (-1L + link_speed +
366 nic_cfg->tc_min_rate[tc] *
369 tc_weight[tc] = min(tc_weight[tc], max_weight);
370 sum_weight += tc_weight[tc];
374 /* WSP, if min_rate is set for at least one TC.
377 * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
378 * overwrite it here in that case.
380 if (!nic_cfg->is_ptp)
381 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
383 /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
384 * leave Descriptor TC Arbiter as RR.
386 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
388 hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
390 for (tc = 0; tc != nic_cfg->tcs; tc++) {
391 const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
392 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
393 u32 weight, max_credit;
395 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
397 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
399 if (num_min_rated_tcs) {
400 weight = tc_weight[tc];
402 if (!weight && sum_weight < max_weight)
403 weight = (max_weight - sum_weight) /
404 (nic_cfg->tcs - num_min_rated_tcs);
408 max_credit = max(8 * weight, fixed_max_credit);
414 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
415 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
418 hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
421 /* Nominal rate is always 10G */
422 const u32 rate = 10000U * scale /
423 nic_cfg->tc_max_rate[tc];
424 const u32 rate_int = rate >>
425 HW_ATL_TPS_DESC_RATE_Y_WIDTH;
426 const u32 rate_frac = rate & frac_msk;
428 hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
429 hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
431 /* A value of 1 indicates the queue is not
434 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
435 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
438 for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
439 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
441 hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
442 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
443 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
446 return aq_hw_err_from_flags(self);
449 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
451 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
453 /* Tx TC/Queue number config */
454 hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
456 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
457 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
458 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
461 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
464 aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
465 0x00010000U : 0x00000000U);
466 hw_atl_tdm_tx_dca_en_set(self, 0U);
467 hw_atl_tdm_tx_dca_mode_set(self, 0U);
469 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
471 return aq_hw_err_from_flags(self);
474 void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
476 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
477 u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
480 rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
481 HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
482 HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
484 hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
487 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
489 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
492 /* Rx TC/RSS number config */
493 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
495 /* Rx flow control */
496 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
498 /* RSS Ring selection */
499 hw_atl_b0_hw_init_rx_rss_ctrl1(self);
501 /* Multicast filters */
502 for (i = HW_ATL_B0_MAC_MAX; i--;) {
503 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
504 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
507 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
508 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
511 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
512 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
514 hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
516 // Always accept untagged packets
517 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
518 hw_atl_rpf_vlan_untagged_act_set(self, 1U);
521 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
524 aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
525 0x000F0000U : 0x00000000U);
527 hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
528 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
530 hw_atl_rdm_rx_dca_en_set(self, 0U);
531 hw_atl_rdm_rx_dca_mode_set(self, 0U);
533 return aq_hw_err_from_flags(self);
536 int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
546 h = (mac_addr[0] << 8) | (mac_addr[1]);
547 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
548 (mac_addr[4] << 8) | mac_addr[5];
550 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
551 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
552 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
553 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
555 err = aq_hw_err_from_flags(self);
561 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
563 static u32 aq_hw_atl_igcr_table_[4][2] = {
564 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
565 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
566 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
567 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
569 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
574 hw_atl_b0_hw_init_tx_path(self);
575 hw_atl_b0_hw_init_rx_path(self);
577 hw_atl_b0_hw_mac_addr_set(self, mac_addr);
579 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
580 self->aq_fw_ops->set_state(self, MPI_INIT);
582 hw_atl_b0_hw_qos_set(self);
583 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
584 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
586 /* Force limit MRRS on RDM/TDM to 2K */
587 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
588 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
589 (val & ~0x707) | 0x404);
591 /* TX DMA total request limit. B0 hardware is not capable to
592 * handle more than (8K-MRRS) incoming DMA data.
593 * Value 24 in 256byte units
595 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
597 /* Reset link status and read out initial hardware counters */
598 self->aq_link_status.mbps = 0;
599 self->aq_fw_ops->update_stats(self);
601 err = aq_hw_err_from_flags(self);
606 hw_atl_reg_irq_glb_ctl_set(self,
607 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
608 [(aq_nic_cfg->vecs > 1U) ?
611 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
614 hw_atl_reg_gen_irq_map_set(self,
615 ((HW_ATL_B0_ERR_INT << 0x18) |
617 ((HW_ATL_B0_ERR_INT << 0x10) |
620 /* Enable link interrupt */
621 if (aq_nic_cfg->link_irq_vec)
622 hw_atl_reg_gen_irq_map_set(self, BIT(7) |
623 aq_nic_cfg->link_irq_vec, 3U);
625 hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
631 int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
633 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
635 return aq_hw_err_from_flags(self);
638 int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
640 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
642 return aq_hw_err_from_flags(self);
645 int hw_atl_b0_hw_start(struct aq_hw_s *self)
647 hw_atl_tpb_tx_buff_en_set(self, 1);
648 hw_atl_rpb_rx_buff_en_set(self, 1);
650 return aq_hw_err_from_flags(self);
653 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
654 struct aq_ring_s *ring)
656 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
661 int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
664 struct aq_ring_buff_s *buff = NULL;
665 struct hw_atl_txd_s *txd = NULL;
666 unsigned int buff_pa_len = 0U;
667 unsigned int frag_count = 0U;
668 unsigned int pkt_len = 0U;
669 bool is_vlan = false;
672 buff = &ring->buff_ring[ring->sw_tail];
673 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
675 for (frag_count = 0; frag_count < frags; frag_count++) {
676 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
682 buff = &ring->buff_ring[ring->sw_tail];
684 if (buff->is_gso_tcp || buff->is_gso_udp) {
685 if (buff->is_gso_tcp)
686 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
687 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
688 txd->ctl |= (buff->len_l3 << 31) |
689 (buff->len_l2 << 24);
690 txd->ctl2 |= (buff->mss << 16);
693 pkt_len -= (buff->len_l4 +
697 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
698 txd->ctl2 |= (buff->len_l4 << 8) |
702 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
703 txd->ctl |= buff->vlan_tx_tag << 4;
706 if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
707 buff_pa_len = buff->len;
709 txd->buf_addr = buff->pa;
710 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
711 ((u32)buff_pa_len << 4));
712 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
715 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
717 if (is_gso || is_vlan) {
718 /* enable tx context */
719 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
722 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
724 /* Tx checksum offloads */
726 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
728 if (buff->is_udp_cso || buff->is_tcp_cso)
729 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
732 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
734 if (unlikely(buff->is_eop)) {
735 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
736 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
741 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
744 hw_atl_b0_hw_tx_ring_tail_update(self, ring);
746 return aq_hw_err_from_flags(self);
749 int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
750 struct aq_ring_param_s *aq_ring_param)
752 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
753 u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
754 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
756 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
758 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
760 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
763 hw_atl_reg_rx_dma_desc_base_addressmswset(self,
764 dma_desc_addr_msw, aq_ring->idx);
766 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
768 hw_atl_rdm_rx_desc_data_buff_size_set(self,
769 AQ_CFG_RX_FRAME_MAX / 1024U,
772 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
773 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
774 hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
777 /* Rx ring set mode */
779 /* Mapping interrupt vector */
780 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
781 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
783 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
784 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
785 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
786 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
788 return aq_hw_err_from_flags(self);
791 int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
792 struct aq_ring_param_s *aq_ring_param)
794 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
795 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
797 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
800 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
803 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
805 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
807 /* Set Tx threshold */
808 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
810 /* Mapping interrupt vector */
811 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
812 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
814 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
815 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
817 return aq_hw_err_from_flags(self);
820 int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
821 unsigned int sw_tail_old)
823 for (; sw_tail_old != ring->sw_tail;
824 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
825 struct hw_atl_rxd_s *rxd =
826 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
829 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
831 rxd->buf_addr = buff->pa;
835 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
837 return aq_hw_err_from_flags(self);
840 static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
841 struct aq_ring_s *ring)
845 for (i = aq_ring_avail_dx(ring); i--;
846 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
847 struct hw_atl_rxd_s *rxd =
848 (struct hw_atl_rxd_s *)
849 &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
851 rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
854 /* Make sure descriptors are updated before bump tail*/
857 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
859 return aq_hw_err_from_flags(self);
862 static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
863 struct aq_ring_s *ring)
865 while (ring->hw_head != ring->sw_tail) {
866 struct hw_atl_rxd_hwts_wb_s *hwts_wb =
867 (struct hw_atl_rxd_hwts_wb_s *)
868 (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
870 /* RxD is not done */
871 if (!(hwts_wb->sec_lw0 & 0x1U))
874 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
877 return aq_hw_err_from_flags(self);
880 int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
881 struct aq_ring_s *ring)
883 unsigned int hw_head_;
886 hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
888 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
892 ring->hw_head = hw_head_;
893 err = aq_hw_err_from_flags(self);
899 int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
901 for (; ring->hw_head != ring->sw_tail;
902 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
903 struct aq_ring_buff_s *buff = NULL;
904 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
905 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
907 unsigned int is_rx_check_sum_enabled = 0U;
908 unsigned int pkt_type = 0U;
911 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
915 buff = &ring->buff_ring[ring->hw_head];
918 buff->is_hash_l4 = 0U;
920 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
922 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
924 pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
925 HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
927 if (is_rx_check_sum_enabled & BIT(0) &&
928 (0x0U == (pkt_type & 0x3U)))
929 buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
931 if (is_rx_check_sum_enabled & BIT(1)) {
932 if (0x4U == (pkt_type & 0x1CU))
933 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
934 !!(rx_stat & BIT(3));
935 else if (0x0U == (pkt_type & 0x1CU))
936 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
937 !!(rx_stat & BIT(3));
939 buff->is_cso_err = !!(rx_stat & 0x6);
940 /* Checksum offload workaround for small packets */
941 if (unlikely(rxd_wb->pkt_len <= 60)) {
942 buff->is_ip_cso = 0U;
943 buff->is_cso_err = 0U;
946 if (self->aq_nic_cfg->is_vlan_rx_strip &&
947 ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
948 (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
950 buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
953 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
954 /* MAC error or DMA error */
957 if (self->aq_nic_cfg->is_rss) {
959 u16 rss_type = rxd_wb->type & 0xFU;
961 if (rss_type && rss_type < 0x8U) {
962 buff->is_hash_l4 = (rss_type == 0x4 ||
964 buff->rss_hash = rxd_wb->rss_hash;
968 buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
970 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
971 buff->len = rxd_wb->pkt_len %
973 buff->len = buff->len ?
974 buff->len : AQ_CFG_RX_FRAME_MAX;
979 rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
980 AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
984 buff->next = rxd_wb->next_desc_ptr;
985 ++ring->stats.rx.lro_packets;
989 aq_ring_next_dx(ring,
991 ++ring->stats.rx.jumbo_packets;
996 return aq_hw_err_from_flags(self);
999 int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
1001 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
1003 return aq_hw_err_from_flags(self);
1006 int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
1008 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
1009 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
1011 atomic_inc(&self->dpc);
1013 return aq_hw_err_from_flags(self);
1016 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
1018 *mask = hw_atl_itr_irq_statuslsw_get(self);
1020 return aq_hw_err_from_flags(self);
1023 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
1025 int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
1026 unsigned int packet_filter)
1028 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
1029 unsigned int i = 0U;
1033 l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) ||
1034 !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET));
1035 vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc;
1037 hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc);
1039 hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc);
1041 hw_atl_rpfl2multicast_flr_en_set(self,
1042 IS_FILTER_ENABLED(IFF_ALLMULTI) &&
1043 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
1045 hw_atl_rpfl2_accept_all_mc_packets_set(self,
1046 IS_FILTER_ENABLED(IFF_ALLMULTI) &&
1047 IS_FILTER_ENABLED(IFF_MULTICAST));
1049 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
1052 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
1053 hw_atl_rpfl2_uc_flr_en_set(self,
1054 (cfg->is_mc_list_enabled &&
1055 (i <= cfg->mc_list_count)) ?
1058 return aq_hw_err_from_flags(self);
1061 #undef IS_FILTER_ENABLED
1063 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
1065 [AQ_HW_MULTICAST_ADDRESS_MAX]
1070 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
1072 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
1076 for (cfg->mc_list_count = 0U;
1077 cfg->mc_list_count < count;
1078 ++cfg->mc_list_count) {
1079 u32 i = cfg->mc_list_count;
1080 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
1081 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
1082 (ar_mac[i][4] << 8) | ar_mac[i][5];
1084 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
1086 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
1087 HW_ATL_B0_MAC_MIN + i);
1089 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
1090 HW_ATL_B0_MAC_MIN + i);
1092 hw_atl_rpfl2_uc_flr_en_set(self,
1093 (cfg->is_mc_list_enabled),
1094 HW_ATL_B0_MAC_MIN + i);
1097 err = aq_hw_err_from_flags(self);
1103 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
1105 unsigned int i = 0U;
1109 switch (self->aq_nic_cfg->itr) {
1110 case AQ_CFG_INTERRUPT_MODERATION_ON:
1111 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
1112 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
1113 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
1114 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
1115 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
1117 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
1118 /* HW timers are in 2us units */
1119 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
1120 int tx_min_timer = tx_max_timer / 2;
1122 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
1123 int rx_min_timer = rx_max_timer / 2;
1125 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
1126 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
1127 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
1128 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
1130 itr_tx |= tx_min_timer << 0x8U;
1131 itr_tx |= tx_max_timer << 0x10U;
1132 itr_rx |= rx_min_timer << 0x8U;
1133 itr_rx |= rx_max_timer << 0x10U;
1135 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
1136 {0xfU, 0xffU}, /* 10Gbit */
1137 {0xfU, 0x1ffU}, /* 5Gbit */
1138 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
1139 {0xfU, 0x1ffU}, /* 2.5Gbit */
1140 {0xfU, 0x1ffU}, /* 1Gbit */
1141 {0xfU, 0x1ffU}, /* 100Mbit */
1144 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
1145 {0x6U, 0x38U},/* 10Gbit */
1146 {0xCU, 0x70U},/* 5Gbit */
1147 {0xCU, 0x70U},/* 5Gbit 5GS */
1148 {0x18U, 0xE0U},/* 2.5Gbit */
1149 {0x30U, 0x80U},/* 1Gbit */
1150 {0x4U, 0x50U},/* 100Mbit */
1153 unsigned int speed_index =
1154 hw_atl_utils_mbps_2_speed_index(
1155 self->aq_link_status.mbps);
1157 /* Update user visible ITR settings */
1158 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
1159 [speed_index][1] * 2;
1160 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
1161 [speed_index][1] * 2;
1163 itr_tx |= hw_atl_b0_timers_table_tx_
1164 [speed_index][0] << 0x8U;
1165 itr_tx |= hw_atl_b0_timers_table_tx_
1166 [speed_index][1] << 0x10U;
1168 itr_rx |= hw_atl_b0_timers_table_rx_
1169 [speed_index][0] << 0x8U;
1170 itr_rx |= hw_atl_b0_timers_table_rx_
1171 [speed_index][1] << 0x10U;
1174 case AQ_CFG_INTERRUPT_MODERATION_OFF:
1175 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
1176 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
1177 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
1178 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
1184 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
1185 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
1186 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
1189 return aq_hw_err_from_flags(self);
1192 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
1197 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
1199 /* Invalidate Descriptor Cache to prevent writing to the cached
1200 * descriptors and to the data pointer of those descriptors
1202 hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
1204 err = aq_hw_err_from_flags(self);
1209 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
1210 self, val, val == 1, 1000U, 10000U);
1216 int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
1218 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
1220 return aq_hw_err_from_flags(self);
1223 int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
1225 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
1227 return aq_hw_err_from_flags(self);
1230 #define get_ptp_ts_val_u64(self, indx) \
1231 ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1233 static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
1237 hw_atl_pcs_ptp_clock_read_enable(self, 1);
1238 hw_atl_pcs_ptp_clock_read_enable(self, 0);
1239 ns = (get_ptp_ts_val_u64(self, 0) +
1240 (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
1241 (get_ptp_ts_val_u64(self, 3) +
1242 (get_ptp_ts_val_u64(self, 4) << 16));
1244 *stamp = ns + self->ptp_clk_offset;
1247 static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
1249 /* For accuracy, the digit is extended */
1250 s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
1254 base_ns = div64_s64(base_ns, freq);
1255 nsi = div64_u64(base_ns, NSEC_PER_SEC);
1257 if (base_ns != nsi * NSEC_PER_SEC) {
1258 s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
1259 base_ns - nsi * NSEC_PER_SEC);
1260 nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor);
1264 *fns = (u32)nsi_frac;
1268 hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
1269 u64 phyfreq, u64 macfreq)
1272 s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
1273 AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy);
1274 s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
1275 AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac);
1276 s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
1277 s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
1278 /* MAC MCP counter freq is macfreq / 4 */
1279 s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
1282 diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
1283 AQ_HW_MAC_COUNTER_HZ);
1284 adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS *
1285 ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
1287 ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS);
1288 ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
1292 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
1294 self->ptp_clk_offset += delta;
1296 self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
1301 static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
1303 s64 delta = time - (self->ptp_clk_offset + ts);
1305 return hw_atl_b0_adj_sys_clock(self, delta);
1308 static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
1310 *time = self->ptp_clk_offset + ts;
1314 static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
1316 struct hw_fw_request_iface fwreq;
1319 memset(&fwreq, 0, sizeof(fwreq));
1321 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
1322 hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
1323 &fwreq.ptp_adj_freq.ns_mac,
1324 &fwreq.ptp_adj_freq.fns_mac);
1325 hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
1326 &fwreq.ptp_adj_freq.ns_phy,
1327 &fwreq.ptp_adj_freq.fns_phy);
1328 hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
1329 AQ_HW_PHY_COUNTER_HZ,
1330 AQ_HW_MAC_COUNTER_HZ);
1332 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
1333 return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1336 static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
1337 u64 start, u32 period)
1339 struct hw_fw_request_iface fwreq;
1342 memset(&fwreq, 0, sizeof(fwreq));
1344 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
1345 fwreq.ptp_gpio_ctrl.index = index;
1346 fwreq.ptp_gpio_ctrl.period = period;
1347 /* Apply time offset */
1348 fwreq.ptp_gpio_ctrl.start = start;
1350 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
1351 return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1354 static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
1357 /* Enable/disable Sync1588 GPIO Timestamping */
1358 aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
1363 static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
1373 /* PTP external GPIO clock seconds count 15:0 */
1374 sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
1375 /* PTP external GPIO clock seconds count 31:16 */
1376 sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
1377 /* PTP external GPIO clock nanoseconds count 15:0 */
1378 nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
1379 /* PTP external GPIO clock nanoseconds count 31:16 */
1380 nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
1382 *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
1387 static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
1388 unsigned int len, u64 *timestamp)
1390 unsigned int offset = 14;
1396 if (len <= offset || !timestamp)
1399 /* The TIMESTAMP in the end of package has following format:
1404 * uint16_t stream_id;
1407 ptr = p + (len - offset);
1408 memcpy(&sec, ptr, sizeof(sec));
1410 memcpy(&ns, ptr, sizeof(ns));
1412 *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
1413 be32_to_cpu(ns) + self->ptp_clk_offset;
1415 eth = (struct ethhdr *)p;
1417 return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
1420 static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
1423 struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
1427 tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
1429 tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
1431 tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
1433 tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
1435 ns = sec * NSEC_PER_SEC + hwts_wb->ns;
1437 *timestamp = ns + self->ptp_clk_offset;
1441 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
1442 struct aq_rx_filter_l3l4 *data)
1444 u8 location = data->location;
1446 if (!data->is_ipv6) {
1447 hw_atl_rpfl3l4_cmd_clear(self, location);
1448 hw_atl_rpf_l4_spd_set(self, 0U, location);
1449 hw_atl_rpf_l4_dpd_set(self, 0U, location);
1450 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
1451 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
1455 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
1456 hw_atl_rpfl3l4_cmd_clear(self, location + i);
1457 hw_atl_rpf_l4_spd_set(self, 0U, location + i);
1458 hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
1460 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
1461 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
1464 return aq_hw_err_from_flags(self);
1467 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
1468 struct aq_rx_filter_l3l4 *data)
1470 u8 location = data->location;
1472 hw_atl_b0_hw_fl3l4_clear(self, data);
1474 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
1475 HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
1476 if (!data->is_ipv6) {
1477 hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1480 hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1484 hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1487 hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1493 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
1494 HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
1495 hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1496 hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1499 hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1501 return aq_hw_err_from_flags(self);
1504 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1505 struct aq_rx_filter_l2 *data)
1507 hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1508 hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1509 hw_atl_rpf_etht_user_priority_en_set(self,
1510 !!data->user_priority_en,
1512 if (data->user_priority_en)
1513 hw_atl_rpf_etht_user_priority_set(self,
1514 data->user_priority,
1517 if (data->queue < 0) {
1518 hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1519 hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1521 hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1522 hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1523 hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1526 return aq_hw_err_from_flags(self);
1529 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1530 struct aq_rx_filter_l2 *data)
1532 hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1533 hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1534 hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1536 return aq_hw_err_from_flags(self);
1540 * @brief Set VLAN filter table
1541 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1542 * for the particular vlan ids.
1543 * Note: use this function under vlan promisc mode not to lost the traffic
1546 * @param aq_rx_filter_vlan VLAN filter configuration
1547 * @return 0 - OK, <0 - error
1549 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1550 struct aq_rx_filter_vlan *aq_vlans)
1554 for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1555 hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1556 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1557 if (aq_vlans[i].enable) {
1558 hw_atl_rpf_vlan_id_flr_set(self,
1559 aq_vlans[i].vlan_id,
1561 hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1562 hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1563 if (aq_vlans[i].queue != 0xFF) {
1564 hw_atl_rpf_vlan_rxq_flr_set(self,
1567 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1572 return aq_hw_err_from_flags(self);
1575 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1577 /* set promisc in case of disabing the vland filter */
1578 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1580 return aq_hw_err_from_flags(self);
1583 int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
1586 case AQ_HW_LOOPBACK_DMA_SYS:
1587 hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
1588 hw_atl_rpb_dma_sys_lbk_set(self, enable);
1590 case AQ_HW_LOOPBACK_PKT_SYS:
1591 hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
1592 hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
1594 case AQ_HW_LOOPBACK_DMA_NET:
1595 hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
1596 hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
1597 hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
1598 hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
1599 hw_atl_rpb_dma_net_lbk_set(self, enable);
1608 static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self)
1610 if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self))
1616 static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
1623 ts_disabled = (hw_atl_ts_power_down_get(self) == 1U);
1626 // Set AFE Temperature Sensor to on (off by default)
1627 hw_atl_ts_power_down_set(self, 0U);
1629 // Reset internal capacitors, biasing, and counters
1630 hw_atl_ts_reset_set(self, 1);
1631 hw_atl_ts_reset_set(self, 0);
1634 err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get,
1635 self, val, val == 1, 10000U, 500000U);
1639 ts = hw_atl_ts_data_get(self);
1640 *temp = ts * ts * 16 / 100000 + 60 * ts - 83410;
1643 // Set AFE Temperature Sensor back to off
1644 hw_atl_ts_power_down_set(self, 1U);
1650 const struct aq_hw_ops hw_atl_ops_b0 = {
1651 .hw_soft_reset = hw_atl_utils_soft_reset,
1652 .hw_prepare = hw_atl_utils_initfw,
1653 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
1654 .hw_init = hw_atl_b0_hw_init,
1655 .hw_reset = hw_atl_b0_hw_reset,
1656 .hw_start = hw_atl_b0_hw_start,
1657 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
1658 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
1659 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
1660 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
1661 .hw_stop = hw_atl_b0_hw_stop,
1663 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
1664 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
1666 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
1667 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
1669 .hw_irq_enable = hw_atl_b0_hw_irq_enable,
1670 .hw_irq_disable = hw_atl_b0_hw_irq_disable,
1671 .hw_irq_read = hw_atl_b0_hw_irq_read,
1673 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
1674 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
1675 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
1676 .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
1677 .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
1678 .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
1679 .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
1680 .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
1681 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
1682 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1683 .hw_rss_set = hw_atl_b0_hw_rss_set,
1684 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
1685 .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit,
1686 .hw_get_regs = hw_atl_utils_hw_get_regs,
1687 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
1688 .hw_get_fw_version = hw_atl_utils_get_fw_version,
1690 .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
1691 .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
1693 .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
1694 .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
1695 .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
1696 .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
1697 .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
1698 .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
1699 .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
1700 .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
1701 .rx_extract_ts = hw_atl_b0_rx_extract_ts,
1702 .extract_hwts = hw_atl_b0_extract_hwts,
1703 .hw_set_offload = hw_atl_b0_hw_offload_set,
1704 .hw_set_loopback = hw_atl_b0_set_loopback,
1705 .hw_set_fc = hw_atl_b0_set_fc,
1707 .hw_get_mac_temp = hw_atl_b0_get_mac_temp,