1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
3 /* Packet receive logic for Mellanox Gigabit Ethernet driver
5 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
8 #include <linux/etherdevice.h>
9 #include <linux/skbuff.h>
11 #include "mlxbf_gige.h"
12 #include "mlxbf_gige_regs.h"
14 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
15 unsigned int index, u64 dmac)
17 void __iomem *base = priv->base;
20 /* Write destination MAC to specified MAC RX filter */
21 writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
22 (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
24 /* Enable MAC receive filter mask for specified index */
25 control = readq(base + MLXBF_GIGE_CONTROL);
26 control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
27 writeq(control, base + MLXBF_GIGE_CONTROL);
30 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
31 unsigned int index, u64 *dmac)
33 void __iomem *base = priv->base;
35 /* Read destination MAC from specified MAC RX filter */
36 *dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
37 (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
40 void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
42 void __iomem *base = priv->base;
46 /* Enable MAC_ID_RANGE match functionality */
47 control = readq(base + MLXBF_GIGE_CONTROL);
48 control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
49 writeq(control, base + MLXBF_GIGE_CONTROL);
51 /* Set start of destination MAC range check to 0 */
52 writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
54 /* Set end of destination MAC range check to all FFs */
55 end_mac = BCAST_MAC_ADDR;
56 writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
59 void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
61 void __iomem *base = priv->base;
64 /* Disable MAC_ID_RANGE match functionality */
65 control = readq(base + MLXBF_GIGE_CONTROL);
66 control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
67 writeq(control, base + MLXBF_GIGE_CONTROL);
69 /* NOTE: no need to change DMAC_RANGE_START or END;
70 * those values are ignored since MAC_ID_RANGE_EN=0
74 /* Receive Initialization
75 * 1) Configures RX MAC filters via MMIO registers
76 * 2) Allocates RX WQE array using coherent DMA mapping
77 * 3) Initializes each element of RX WQE array with a receive
78 * buffer pointer (also using coherent DMA mapping)
79 * 4) Allocates RX CQE array using coherent DMA mapping
80 * 5) Completes other misc receive initialization
82 int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
84 size_t wq_size, cq_size;
85 dma_addr_t *rx_wqe_ptr;
86 dma_addr_t rx_buf_dma;
90 /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
91 mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
94 wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
95 priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
96 &priv->rx_wqe_base_dma,
98 if (!priv->rx_wqe_base)
101 /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
102 * Each RX WQE is simply a receive buffer pointer, so walk
103 * the entire array, allocating a 2KB buffer for each element
105 rx_wqe_ptr = priv->rx_wqe_base;
107 for (i = 0; i < priv->rx_q_entries; i++) {
108 priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
109 &rx_buf_dma, DMA_FROM_DEVICE);
110 if (!priv->rx_skb[i])
111 goto free_wqe_and_skb;
112 *rx_wqe_ptr++ = rx_buf_dma;
115 /* Write RX WQE base address into MMIO reg */
116 writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
118 cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
119 priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
120 &priv->rx_cqe_base_dma,
122 if (!priv->rx_cqe_base)
123 goto free_wqe_and_skb;
125 for (i = 0; i < priv->rx_q_entries; i++)
126 priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
128 /* Write RX CQE base address into MMIO reg */
129 writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
131 /* Write RX_WQE_PI with current number of replenished buffers */
132 writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
134 /* Enable removal of CRC during RX */
135 data = readq(priv->base + MLXBF_GIGE_RX);
136 data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
137 writeq(data, priv->base + MLXBF_GIGE_RX);
139 /* Enable RX MAC filter pass and discard counters */
140 writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
141 priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
142 writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
143 priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
145 /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
146 * indicate readiness to receive interrupts
148 data = readq(priv->base + MLXBF_GIGE_INT_MASK);
149 data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
150 writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
152 /* Enable RX DMA to write new packets to memory */
153 data = readq(priv->base + MLXBF_GIGE_RX_DMA);
154 data |= MLXBF_GIGE_RX_DMA_EN;
155 writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
157 writeq(ilog2(priv->rx_q_entries),
158 priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
163 rx_wqe_ptr = priv->rx_wqe_base;
164 for (j = 0; j < i; j++) {
165 dma_unmap_single(priv->dev, *rx_wqe_ptr,
166 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
167 dev_kfree_skb(priv->rx_skb[j]);
170 dma_free_coherent(priv->dev, wq_size,
171 priv->rx_wqe_base, priv->rx_wqe_base_dma);
175 /* Receive Deinitialization
176 * This routine will free allocations done by mlxbf_gige_rx_init(),
177 * namely the RX WQE and RX CQE arrays, as well as all RX buffers
179 void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
181 dma_addr_t *rx_wqe_ptr;
186 /* Disable RX DMA to prevent packet transfers to memory */
187 data = readq(priv->base + MLXBF_GIGE_RX_DMA);
188 data &= ~MLXBF_GIGE_RX_DMA_EN;
189 writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
191 rx_wqe_ptr = priv->rx_wqe_base;
193 for (i = 0; i < priv->rx_q_entries; i++) {
194 dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
196 dev_kfree_skb(priv->rx_skb[i]);
200 size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
201 dma_free_coherent(priv->dev, size,
202 priv->rx_wqe_base, priv->rx_wqe_base_dma);
204 size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
205 dma_free_coherent(priv->dev, size,
206 priv->rx_cqe_base, priv->rx_cqe_base_dma);
208 priv->rx_wqe_base = NULL;
209 priv->rx_wqe_base_dma = 0;
210 priv->rx_cqe_base = NULL;
211 priv->rx_cqe_base_dma = 0;
212 writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
213 writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
216 static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
218 struct net_device *netdev = priv->netdev;
219 struct sk_buff *skb = NULL, *rx_skb;
220 u16 rx_pi_rem, rx_ci_rem;
221 dma_addr_t *rx_wqe_addr;
222 dma_addr_t rx_buf_dma;
229 /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
230 rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
231 rx_pi_rem = rx_pi % priv->rx_q_entries;
233 rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
234 rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
235 rx_cqe = *rx_cqe_addr;
237 if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
240 if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
241 /* Packet is OK, increment stats */
242 datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
243 netdev->stats.rx_packets++;
244 netdev->stats.rx_bytes += datalen;
246 skb = priv->rx_skb[rx_pi_rem];
248 /* Alloc another RX SKB for this same index */
249 rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
250 &rx_buf_dma, DMA_FROM_DEVICE);
253 priv->rx_skb[rx_pi_rem] = rx_skb;
254 dma_unmap_single(priv->dev, *rx_wqe_addr,
255 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
257 skb_put(skb, datalen);
259 skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
261 skb->protocol = eth_type_trans(skb, netdev);
263 *rx_wqe_addr = rx_buf_dma;
264 } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
265 priv->stats.rx_mac_errors++;
266 } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
267 priv->stats.rx_truncate_errors++;
270 /* Read receive consumer index before replenish so that this routine
271 * returns accurate return value even if packet is received into
272 * just-replenished buffer prior to exiting this routine.
274 rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
275 rx_ci_rem = rx_ci % priv->rx_q_entries;
277 /* Let hardware know we've replenished one buffer */
280 /* Ensure completion of all writes before notifying HW of replenish */
282 writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
286 rx_pi_rem = rx_pi % priv->rx_q_entries;
288 priv->valid_polarity ^= 1;
291 netif_receive_skb(skb);
293 return rx_pi_rem != rx_ci_rem;
296 /* Driver poll() function called by NAPI infrastructure */
297 int mlxbf_gige_poll(struct napi_struct *napi, int budget)
299 struct mlxbf_gige *priv;
304 priv = container_of(napi, struct mlxbf_gige, napi);
306 mlxbf_gige_handle_tx_complete(priv);
309 remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
310 } while (remaining_pkts && work_done < budget);
312 /* If amount of work done < budget, turn off NAPI polling
313 * via napi_complete_done(napi, work_done) and then
314 * re-enable interrupts.
316 if (work_done < budget && napi_complete_done(napi, work_done)) {
317 /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
318 * indicate receive readiness
320 data = readq(priv->base + MLXBF_GIGE_INT_MASK);
321 data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
322 writeq(data, priv->base + MLXBF_GIGE_INT_MASK);