1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
9 static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
11 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
12 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
13 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
17 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
19 dma_sync_single_for_device(dp->dev, dma_addr,
20 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
24 static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
27 dma_unmap_single_attrs(dp->dev, dma_addr,
28 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
29 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
32 static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
36 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
41 * nfp_net_tx_full() - check if the TX ring is full
42 * @tx_ring: TX ring to check
43 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
45 * This function checks, based on the *host copy* of read/write
46 * pointer if a given TX ring is full. The real TX queue may have
47 * some newly made available slots.
49 * Return: True if the ring is full.
51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
58 wmb(); /* drain writebuffer */
59 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
60 tx_ring->wr_ptr_add = 0;
64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
67 return *tx_ring->txrwb;
68 return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
71 static inline void nfp_net_free_frag(void *frag, bool xdp)
76 __free_page(virt_to_page(frag));
80 * nfp_net_irq_unmask() - Unmask automasked interrupt
81 * @nn: NFP Network structure
82 * @entry_nr: MSI-X table entry
84 * Clear the ICR for the IRQ entry.
86 static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
88 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
96 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
97 struct nfp_net_rx_ring *rx_ring, unsigned int idx);
99 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
100 struct nfp_net_tx_ring *tx_ring, unsigned int idx);
101 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
103 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
104 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
105 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
106 void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
107 void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
108 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
110 enum nfp_nfd_version {
115 * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
116 * @version: Indicate dp type
117 * @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
118 * @poll: Napi poll for normal rx/tx
119 * @xsk_poll: Napi poll when xsk is enabled
120 * @ctrl_poll: Tasklet poll for ctrl rx/tx
121 * @xmit: Xmit for normal path
122 * @ctrl_tx_one: Xmit for ctrl path
123 * @rx_ring_fill_freelist: Give buffers from the ring to FW
124 * @tx_ring_alloc: Allocate resource for a TX ring
125 * @tx_ring_reset: Free any untransmitted buffers and reset pointers
126 * @tx_ring_free: Free resources allocated to a TX ring
127 * @tx_ring_bufs_alloc: Allocate resource for each TX buffer
128 * @tx_ring_bufs_free: Free resources allocated to each TX buffer
129 * @print_tx_descs: Show TX ring's info for debug purpose
132 enum nfp_nfd_version version;
133 unsigned int tx_min_desc_per_pkt;
135 int (*poll)(struct napi_struct *napi, int budget);
136 int (*xsk_poll)(struct napi_struct *napi, int budget);
137 void (*ctrl_poll)(struct tasklet_struct *t);
138 netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
139 bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
140 struct sk_buff *skb, bool old);
141 void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
142 struct nfp_net_rx_ring *rx_ring);
143 int (*tx_ring_alloc)(struct nfp_net_dp *dp,
144 struct nfp_net_tx_ring *tx_ring);
145 void (*tx_ring_reset)(struct nfp_net_dp *dp,
146 struct nfp_net_tx_ring *tx_ring);
147 void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
148 int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
149 struct nfp_net_tx_ring *tx_ring);
150 void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
151 struct nfp_net_tx_ring *tx_ring);
153 void (*print_tx_descs)(struct seq_file *file,
154 struct nfp_net_r_vector *r_vec,
155 struct nfp_net_tx_ring *tx_ring,
156 u32 d_rd_p, u32 d_wr_p);
160 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
162 return dp->ops->tx_ring_reset(dp, tx_ring);
166 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
167 struct nfp_net_rx_ring *rx_ring)
169 dp->ops->rx_ring_fill_freelist(dp, rx_ring);
173 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
175 return dp->ops->tx_ring_alloc(dp, tx_ring);
179 nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
181 dp->ops->tx_ring_free(tx_ring);
185 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
186 struct nfp_net_tx_ring *tx_ring)
188 return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
192 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
193 struct nfp_net_tx_ring *tx_ring)
195 dp->ops->tx_ring_bufs_free(dp, tx_ring);
199 nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
200 struct nfp_net_r_vector *r_vec,
201 struct nfp_net_tx_ring *tx_ring,
202 u32 d_rd_p, u32 d_wr_p)
204 dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
207 extern const struct nfp_dp_ops nfp_nfd3_ops;
209 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
211 #endif /* _NFP_NET_DP_ */