2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
13 #include "../aq_hw_utils.h"
14 #include "../aq_ring.h"
15 #include "../aq_nic.h"
16 #include "hw_atl_b0.h"
17 #include "hw_atl_utils.h"
18 #include "hw_atl_llh.h"
19 #include "hw_atl_b0_internal.h"
20 #include "hw_atl_llh_internal.h"
22 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
26 .vecs = HW_ATL_B0_RSS_MAX, \
27 .tcs = HW_ATL_B0_TC_MAX, \
28 .rxd_alignment = 1U, \
29 .rxd_size = HW_ATL_B0_RXD_SIZE, \
31 .txd_alignment = 1U, \
32 .txd_size = HW_ATL_B0_TXD_SIZE, \
34 .txhwb_alignment = 4096U, \
35 .tx_rings = HW_ATL_B0_TX_RINGS, \
36 .rx_rings = HW_ATL_B0_RX_RINGS, \
37 .hw_features = NETIF_F_HW_CSUM | \
43 .hw_priv_flags = IFF_UNICAST_FLT, \
44 .flow_control = true, \
45 .mtu = HW_ATL_B0_MTU_JUMBO, \
46 .mac_regs_count = 88, \
47 .hw_alive_check_addr = 0x10U
49 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
50 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
51 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
52 .link_speed_msk = HW_ATL_B0_RATE_10G |
59 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
61 .media_type = AQ_HW_MEDIA_TYPE_TP,
62 .link_speed_msk = HW_ATL_B0_RATE_10G |
69 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
71 .media_type = AQ_HW_MEDIA_TYPE_TP,
72 .link_speed_msk = HW_ATL_B0_RATE_5G |
78 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
79 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
80 .media_type = AQ_HW_MEDIA_TYPE_TP,
81 .link_speed_msk = HW_ATL_B0_RATE_2G5 |
86 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
90 err = hw_atl_utils_soft_reset(self);
94 self->aq_fw_ops->set_state(self, MPI_RESET);
96 err = aq_hw_err_from_flags(self);
101 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
105 unsigned int i_priority = 0U;
106 bool is_rx_flow_control = false;
108 /* TPS Descriptor rate init */
109 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
110 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
113 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
115 /* TPS TC credits init */
116 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
117 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
119 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
120 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
121 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
122 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
125 buff_size = HW_ATL_B0_TXBUF_MAX;
127 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
128 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
130 (1024 / 32U) * 66U) /
132 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
134 (1024 / 32U) * 50U) /
137 /* QoS Rx buf size per TC */
139 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
140 buff_size = HW_ATL_B0_RXBUF_MAX;
142 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
143 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
145 (1024U / 32U) * 66U) /
147 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
149 (1024U / 32U) * 50U) /
151 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
153 /* QoS 802.1p priority -> TC mapping */
154 for (i_priority = 8U; i_priority--;)
155 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
157 return aq_hw_err_from_flags(self);
160 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
161 struct aq_rss_parameters *rss_params)
163 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
166 unsigned int addr = 0U;
168 for (i = 10, addr = 0U; i--; ++addr) {
169 u32 key_data = cfg->is_rss ?
170 __swab32(rss_params->hash_secret_key[i]) : 0U;
171 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
172 hw_atl_rpf_rss_key_addr_set(self, addr);
173 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
174 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
180 err = aq_hw_err_from_flags(self);
186 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
187 struct aq_rss_parameters *rss_params)
189 u8 *indirection_table = rss_params->indirection_table;
191 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
193 u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
194 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
196 memset(bitary, 0, sizeof(bitary));
198 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
199 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
200 ((indirection_table[i] % num_rss_queues) <<
204 for (i = ARRAY_SIZE(bitary); i--;) {
205 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
206 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
207 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
208 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
214 err = aq_hw_err_from_flags(self);
220 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
221 struct aq_nic_cfg_s *aq_nic_cfg)
225 /* TX checksums offloads*/
226 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
227 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
229 /* RX checksums offloads*/
230 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
231 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
234 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
238 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
239 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
240 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
242 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
243 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
245 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
246 hw_atl_rpo_lro_inactive_interval_set(self, 0);
247 hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
249 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
251 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
253 hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
255 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
257 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
259 hw_atl_rpo_lro_en_set(self,
260 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
262 return aq_hw_err_from_flags(self);
265 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
267 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
268 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
269 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
272 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
275 aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
276 0x00010000U : 0x00000000U);
277 hw_atl_tdm_tx_dca_en_set(self, 0U);
278 hw_atl_tdm_tx_dca_mode_set(self, 0U);
280 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
282 return aq_hw_err_from_flags(self);
285 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
287 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
290 /* Rx TC/RSS number config */
291 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
293 /* Rx flow control */
294 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
296 /* RSS Ring selection */
297 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
298 0xB3333333U : 0x00000000U);
300 /* Multicast filters */
301 for (i = HW_ATL_B0_MAC_MAX; i--;) {
302 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
303 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
306 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
307 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
310 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
311 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
314 hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
315 hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
316 hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
318 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
319 hw_atl_rpf_vlan_untagged_act_set(self, 1U);
321 hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
322 hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
323 hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
325 hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
329 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
332 aq_hw_write_reg(self, 0x00005040U,
333 IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
335 hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
336 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
338 hw_atl_rdm_rx_dca_en_set(self, 0U);
339 hw_atl_rdm_rx_dca_mode_set(self, 0U);
341 return aq_hw_err_from_flags(self);
344 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
354 h = (mac_addr[0] << 8) | (mac_addr[1]);
355 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
356 (mac_addr[4] << 8) | mac_addr[5];
358 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
359 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
360 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
361 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
363 err = aq_hw_err_from_flags(self);
369 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
371 static u32 aq_hw_atl_igcr_table_[4][2] = {
372 { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
373 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
374 { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
375 { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
381 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
383 hw_atl_b0_hw_init_tx_path(self);
384 hw_atl_b0_hw_init_rx_path(self);
386 hw_atl_b0_hw_mac_addr_set(self, mac_addr);
388 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
389 self->aq_fw_ops->set_state(self, MPI_INIT);
391 hw_atl_b0_hw_qos_set(self);
392 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
393 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
395 /* Force limit MRRS on RDM/TDM to 2K */
396 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
397 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
398 (val & ~0x707) | 0x404);
400 /* TX DMA total request limit. B0 hardware is not capable to
401 * handle more than (8K-MRRS) incoming DMA data.
402 * Value 24 in 256byte units
404 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
406 /* Reset link status and read out initial hardware counters */
407 self->aq_link_status.mbps = 0;
408 self->aq_fw_ops->update_stats(self);
410 err = aq_hw_err_from_flags(self);
415 hw_atl_reg_irq_glb_ctl_set(self,
416 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
417 [(aq_nic_cfg->vecs > 1U) ?
420 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
423 hw_atl_reg_gen_irq_map_set(self,
424 ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
425 ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
427 hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
433 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
434 struct aq_ring_s *ring)
436 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
437 return aq_hw_err_from_flags(self);
440 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
441 struct aq_ring_s *ring)
443 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
444 return aq_hw_err_from_flags(self);
447 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
449 hw_atl_tpb_tx_buff_en_set(self, 1);
450 hw_atl_rpb_rx_buff_en_set(self, 1);
451 return aq_hw_err_from_flags(self);
454 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
455 struct aq_ring_s *ring)
457 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
461 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
462 struct aq_ring_s *ring,
465 struct aq_ring_buff_s *buff = NULL;
466 struct hw_atl_txd_s *txd = NULL;
467 unsigned int buff_pa_len = 0U;
468 unsigned int pkt_len = 0U;
469 unsigned int frag_count = 0U;
472 buff = &ring->buff_ring[ring->sw_tail];
473 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
475 for (frag_count = 0; frag_count < frags; frag_count++) {
476 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
482 buff = &ring->buff_ring[ring->sw_tail];
485 txd->ctl |= (buff->len_l3 << 31) |
486 (buff->len_l2 << 24) |
487 HW_ATL_B0_TXD_CTL_CMD_TCP |
488 HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
489 txd->ctl2 |= (buff->mss << 16) |
490 (buff->len_l4 << 8) |
493 pkt_len -= (buff->len_l4 +
499 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
501 buff_pa_len = buff->len;
503 txd->buf_addr = buff->pa;
504 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
505 ((u32)buff_pa_len << 4));
506 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
508 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
511 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
512 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
515 /* Tx checksum offloads */
517 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
519 if (buff->is_udp_cso || buff->is_tcp_cso)
520 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
522 if (unlikely(buff->is_eop)) {
523 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
524 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
529 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
532 hw_atl_b0_hw_tx_ring_tail_update(self, ring);
533 return aq_hw_err_from_flags(self);
536 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
537 struct aq_ring_s *aq_ring,
538 struct aq_ring_param_s *aq_ring_param)
540 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
541 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
543 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
545 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
547 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
550 hw_atl_reg_rx_dma_desc_base_addressmswset(self,
551 dma_desc_addr_msw, aq_ring->idx);
553 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
555 hw_atl_rdm_rx_desc_data_buff_size_set(self,
556 AQ_CFG_RX_FRAME_MAX / 1024U,
559 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
560 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
561 hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
563 /* Rx ring set mode */
565 /* Mapping interrupt vector */
566 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
567 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
569 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
570 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
571 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
572 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
574 return aq_hw_err_from_flags(self);
577 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
578 struct aq_ring_s *aq_ring,
579 struct aq_ring_param_s *aq_ring_param)
581 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
582 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
584 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
587 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
590 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
592 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
594 /* Set Tx threshold */
595 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
597 /* Mapping interrupt vector */
598 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
599 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
601 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
602 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
604 return aq_hw_err_from_flags(self);
607 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
608 struct aq_ring_s *ring,
609 unsigned int sw_tail_old)
611 for (; sw_tail_old != ring->sw_tail;
612 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
613 struct hw_atl_rxd_s *rxd =
614 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
617 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
619 rxd->buf_addr = buff->pa;
623 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
625 return aq_hw_err_from_flags(self);
628 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
629 struct aq_ring_s *ring)
632 unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
634 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
638 ring->hw_head = hw_head_;
639 err = aq_hw_err_from_flags(self);
645 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
646 struct aq_ring_s *ring)
648 struct device *ndev = aq_nic_get_dev(ring->aq_nic);
650 for (; ring->hw_head != ring->sw_tail;
651 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
652 struct aq_ring_buff_s *buff = NULL;
653 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
654 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
656 unsigned int is_err = 1U;
657 unsigned int is_rx_check_sum_enabled = 0U;
658 unsigned int pkt_type = 0U;
660 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
664 buff = &ring->buff_ring[ring->hw_head];
666 is_err = (0x0000003CU & rxd_wb->status);
668 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
669 is_err &= ~0x20U; /* exclude validity bit */
671 pkt_type = 0xFFU & (rxd_wb->type >> 4);
673 if (is_rx_check_sum_enabled) {
674 if (0x0U == (pkt_type & 0x3U))
675 buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
677 if (0x4U == (pkt_type & 0x1CU))
678 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
679 else if (0x0U == (pkt_type & 0x1CU))
680 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
682 /* Checksum offload workaround for small packets */
683 if (rxd_wb->pkt_len <= 60) {
684 buff->is_ip_cso = 0U;
685 buff->is_cso_err = 0U;
691 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
693 if (is_err || rxd_wb->type & 0x1000U) {
694 /* status error or DMA error */
697 if (self->aq_nic_cfg->is_rss) {
699 u16 rss_type = rxd_wb->type & 0xFU;
701 if (rss_type && rss_type < 0x8U) {
702 buff->is_hash_l4 = (rss_type == 0x4 ||
704 buff->rss_hash = rxd_wb->rss_hash;
708 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
709 buff->len = rxd_wb->pkt_len %
711 buff->len = buff->len ?
712 buff->len : AQ_CFG_RX_FRAME_MAX;
716 if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
719 buff->next = rxd_wb->next_desc_ptr;
720 ++ring->stats.rx.lro_packets;
724 aq_ring_next_dx(ring,
726 ++ring->stats.rx.jumbo_packets;
732 return aq_hw_err_from_flags(self);
735 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
737 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
738 return aq_hw_err_from_flags(self);
741 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
743 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
744 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
746 atomic_inc(&self->dpc);
747 return aq_hw_err_from_flags(self);
750 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
752 *mask = hw_atl_itr_irq_statuslsw_get(self);
753 return aq_hw_err_from_flags(self);
756 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
758 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
759 unsigned int packet_filter)
763 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
764 hw_atl_rpfl2multicast_flr_en_set(self,
765 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
767 hw_atl_rpfl2_accept_all_mc_packets_set(self,
768 IS_FILTER_ENABLED(IFF_ALLMULTI));
770 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
772 self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
774 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
775 hw_atl_rpfl2_uc_flr_en_set(self,
776 (self->aq_nic_cfg->is_mc_list_enabled &&
777 (i <= self->aq_nic_cfg->mc_list_count)) ?
780 return aq_hw_err_from_flags(self);
783 #undef IS_FILTER_ENABLED
785 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
787 [AQ_CFG_MULTICAST_ADDRESS_MAX]
793 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
797 for (self->aq_nic_cfg->mc_list_count = 0U;
798 self->aq_nic_cfg->mc_list_count < count;
799 ++self->aq_nic_cfg->mc_list_count) {
800 u32 i = self->aq_nic_cfg->mc_list_count;
801 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
802 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
803 (ar_mac[i][4] << 8) | ar_mac[i][5];
805 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
807 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
808 l, HW_ATL_B0_MAC_MIN + i);
810 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
811 h, HW_ATL_B0_MAC_MIN + i);
813 hw_atl_rpfl2_uc_flr_en_set(self,
814 (self->aq_nic_cfg->is_mc_list_enabled),
815 HW_ATL_B0_MAC_MIN + i);
818 err = aq_hw_err_from_flags(self);
824 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
830 switch (self->aq_nic_cfg->itr) {
831 case AQ_CFG_INTERRUPT_MODERATION_ON:
832 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
833 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
834 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
835 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
836 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
838 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
839 /* HW timers are in 2us units */
840 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
841 int tx_min_timer = tx_max_timer / 2;
843 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
844 int rx_min_timer = rx_max_timer / 2;
846 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
847 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
848 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
849 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
851 itr_tx |= tx_min_timer << 0x8U;
852 itr_tx |= tx_max_timer << 0x10U;
853 itr_rx |= rx_min_timer << 0x8U;
854 itr_rx |= rx_max_timer << 0x10U;
856 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
857 {0xfU, 0xffU}, /* 10Gbit */
858 {0xfU, 0x1ffU}, /* 5Gbit */
859 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
860 {0xfU, 0x1ffU}, /* 2.5Gbit */
861 {0xfU, 0x1ffU}, /* 1Gbit */
862 {0xfU, 0x1ffU}, /* 100Mbit */
865 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
866 {0x6U, 0x38U},/* 10Gbit */
867 {0xCU, 0x70U},/* 5Gbit */
868 {0xCU, 0x70U},/* 5Gbit 5GS */
869 {0x18U, 0xE0U},/* 2.5Gbit */
870 {0x30U, 0x80U},/* 1Gbit */
871 {0x4U, 0x50U},/* 100Mbit */
874 unsigned int speed_index =
875 hw_atl_utils_mbps_2_speed_index(
876 self->aq_link_status.mbps);
878 /* Update user visible ITR settings */
879 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
880 [speed_index][1] * 2;
881 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
882 [speed_index][1] * 2;
884 itr_tx |= hw_atl_b0_timers_table_tx_
885 [speed_index][0] << 0x8U;
886 itr_tx |= hw_atl_b0_timers_table_tx_
887 [speed_index][1] << 0x10U;
889 itr_rx |= hw_atl_b0_timers_table_rx_
890 [speed_index][0] << 0x8U;
891 itr_rx |= hw_atl_b0_timers_table_rx_
892 [speed_index][1] << 0x10U;
895 case AQ_CFG_INTERRUPT_MODERATION_OFF:
896 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
897 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
898 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
899 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
905 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
906 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
907 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
910 return aq_hw_err_from_flags(self);
913 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
915 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
916 return aq_hw_err_from_flags(self);
919 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
920 struct aq_ring_s *ring)
922 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
923 return aq_hw_err_from_flags(self);
926 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
927 struct aq_ring_s *ring)
929 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
930 return aq_hw_err_from_flags(self);
933 const struct aq_hw_ops hw_atl_ops_b0 = {
934 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
935 .hw_init = hw_atl_b0_hw_init,
936 .hw_deinit = hw_atl_utils_hw_deinit,
937 .hw_set_power = hw_atl_utils_hw_set_power,
938 .hw_reset = hw_atl_b0_hw_reset,
939 .hw_start = hw_atl_b0_hw_start,
940 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
941 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
942 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
943 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
944 .hw_stop = hw_atl_b0_hw_stop,
946 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
947 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
949 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
950 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
952 .hw_irq_enable = hw_atl_b0_hw_irq_enable,
953 .hw_irq_disable = hw_atl_b0_hw_irq_disable,
954 .hw_irq_read = hw_atl_b0_hw_irq_read,
956 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
957 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
958 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
959 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
960 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
961 .hw_rss_set = hw_atl_b0_hw_rss_set,
962 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
963 .hw_get_regs = hw_atl_utils_hw_get_regs,
964 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
965 .hw_get_fw_version = hw_atl_utils_get_fw_version,