1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
8 #include "hw_channel.h"
10 /* Microsoft Azure Network Adapter (MANA)'s definitions
12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
16 /* MANA protocol version */
17 #define MANA_MAJOR_VERSION 0
18 #define MANA_MINOR_VERSION 1
19 #define MANA_MICRO_VERSION 1
21 typedef u64 mana_handle_t;
22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 TRI_STATE_UNKNOWN = -1,
30 /* Number of entries for hardware indirection table must be in power of 2 */
31 #define MANA_INDIRECT_TABLE_SIZE 64
32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35 #define MANA_HASH_KEY_SIZE 40
37 #define COMP_ENTRY_SIZE 64
39 #define ADAPTER_MTU_SIZE 1500
40 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
42 #define RX_BUFFERS_PER_QUEUE 512
44 #define MAX_SEND_BUFFERS_PER_QUEUE 256
46 #define EQ_SIZE (8 * PAGE_SIZE)
47 #define LOG2_EQ_THROTTLE 3
49 #define MAX_PORTS_IN_MANA_DEV 256
54 struct u64_stats_sync syncp;
58 struct gdma_queue *gdma_sq;
71 struct net_device *ndev;
73 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
74 struct sk_buff_head pending_skbs;
75 struct netdev_queue *net_txq;
77 atomic_t pending_sends;
79 struct mana_stats stats;
82 /* skb data and frags dma mappings */
83 struct mana_skb_head {
84 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
86 u32 size[MAX_SKB_FRAGS + 1];
89 #define MANA_HEADROOM sizeof(struct mana_skb_head)
91 enum mana_tx_pkt_format {
92 MANA_SHORT_PKT_FMT = 0,
93 MANA_LONG_PKT_FMT = 1,
96 struct mana_tx_short_oob {
98 u32 is_outer_ipv4 : 1;
99 u32 is_outer_ipv6 : 1;
100 u32 comp_iphdr_csum : 1;
101 u32 comp_tcp_csum : 1;
102 u32 comp_udp_csum : 1;
103 u32 supress_txcqe_gen : 1;
106 u32 trans_off : 10; /* Transport header offset */
108 u32 short_vp_offset : 8;
111 struct mana_tx_long_oob {
113 u32 inner_is_ipv6 : 1;
114 u32 inner_tcp_opt : 1;
115 u32 inject_vlan_pri_tag : 1;
117 u32 pcp : 3; /* 802.1Q */
118 u32 dei : 1; /* 802.1Q */
119 u32 vlan_id : 12; /* 802.1Q */
121 u32 inner_frame_offset : 10;
122 u32 inner_ip_rel_offset : 6;
123 u32 long_vp_offset : 12;
131 struct mana_tx_short_oob s_oob;
132 struct mana_tx_long_oob l_oob;
143 CQE_RX_COALESCED_4 = 2,
144 CQE_RX_OBJECT_FENCE = 3,
145 CQE_RX_TRUNCATED = 4,
149 CQE_TX_MTU_DROP = 34,
150 CQE_TX_INVALID_OOB = 35,
151 CQE_TX_INVALID_ETH_TYPE = 36,
152 CQE_TX_HDR_PROCESSING_ERROR = 37,
153 CQE_TX_VF_DISABLED = 38,
154 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
155 CQE_TX_VPORT_DISABLED = 40,
156 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
159 #define MANA_CQE_COMPLETION 1
161 struct mana_cqe_header {
167 /* NDIS HASH Types */
168 #define NDIS_HASH_IPV4 BIT(0)
169 #define NDIS_HASH_TCP_IPV4 BIT(1)
170 #define NDIS_HASH_UDP_IPV4 BIT(2)
171 #define NDIS_HASH_IPV6 BIT(3)
172 #define NDIS_HASH_TCP_IPV6 BIT(4)
173 #define NDIS_HASH_UDP_IPV6 BIT(5)
174 #define NDIS_HASH_IPV6_EX BIT(6)
175 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
176 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
178 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
179 #define MANA_HASH_L4 \
180 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
181 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
183 struct mana_rxcomp_perpkt_info {
190 #define MANA_RXCOMP_OOB_NUM_PPI 4
192 /* Receive completion OOB */
193 struct mana_rxcomp_oob {
194 struct mana_cqe_header cqe_hdr;
197 u32 rx_vlantag_present : 1;
198 u32 rx_outer_iphdr_csum_succeed : 1;
199 u32 rx_outer_iphdr_csum_fail : 1;
202 u32 rx_iphdr_csum_succeed : 1;
203 u32 rx_iphdr_csum_fail : 1;
204 u32 rx_tcp_csum_succeed : 1;
205 u32 rx_tcp_csum_fail : 1;
206 u32 rx_udp_csum_succeed : 1;
207 u32 rx_udp_csum_fail : 1;
210 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
215 struct mana_tx_comp_oob {
216 struct mana_cqe_header cqe_hdr;
220 u32 tx_sgl_offset : 5;
221 u32 tx_wqe_offset : 27;
228 #define CQE_POLLING_BUFFER 512
231 struct gdma_queue *gdma_cq;
233 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
236 /* Type of the CQ: TX or RX */
237 enum mana_cq_type type;
239 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
240 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
242 struct mana_rxq *rxq;
244 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
245 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
247 struct mana_txq *txq;
249 /* Buffer which the CQ handler can copy the CQE's into. */
250 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
253 struct napi_struct napi;
258 #define GDMA_MAX_RQE_SGES 15
260 struct mana_recv_buf_oob {
261 /* A valid GDMA work request representing the data buffer. */
262 struct gdma_wqe_request wqe_req;
265 dma_addr_t buf_dma_addr;
267 /* SGL of the buffer going to be sent has part of the work request. */
269 struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
271 /* Required to store the result of mana_gd_post_work_request.
272 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
273 * work queue when the WQE is consumed.
275 struct gdma_posted_wqe_info wqe_inf;
279 struct gdma_queue *gdma_rq;
280 /* Cache the gdma receive queue id */
283 /* Index of RQ in the vPort, not gdma receive queue id */
290 struct mana_cq rx_cq;
292 struct completion fence_event;
294 struct net_device *ndev;
296 /* Total number of receive buffers to be allocated */
301 struct mana_stats stats;
303 struct bpf_prog __rcu *bpf_prog;
304 struct xdp_rxq_info xdp_rxq;
306 /* MUST BE THE LAST MEMBER:
307 * Each receive buffer has an associated mana_recv_buf_oob.
309 struct mana_recv_buf_oob rx_oobs[];
315 struct mana_cq tx_cq;
317 mana_handle_t tx_object;
320 struct mana_ethtool_stats {
325 struct mana_context {
326 struct gdma_dev *gdma_dev;
332 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
335 struct mana_port_context {
336 struct mana_context *ac;
337 struct net_device *ndev;
339 u8 mac_addr[ETH_ALEN];
341 enum TRI_STATE rss_state;
343 mana_handle_t default_rxobj;
344 bool tx_shortform_allowed;
347 struct mana_tx_qp *tx_qp;
349 /* Indirection Table for RX & TX. The values are queue indexes */
350 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
352 /* Indirection table containing RxObject Handles */
353 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
355 /* Hash key used by the NIC */
356 u8 hashkey[MANA_HASH_KEY_SIZE];
358 /* This points to an array of num_queues of RQ pointers. */
359 struct mana_rxq **rxqs;
361 struct bpf_prog *bpf_prog;
363 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
364 unsigned int max_queues;
365 unsigned int num_queues;
367 mana_handle_t port_handle;
372 bool port_st_save; /* Saved port state */
374 struct mana_ethtool_stats eth_stats;
377 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
378 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
379 bool update_hash, bool update_tab);
381 int mana_alloc_queues(struct net_device *ndev);
382 int mana_attach(struct net_device *ndev);
383 int mana_detach(struct net_device *ndev, bool from_close);
385 int mana_probe(struct gdma_dev *gd, bool resuming);
386 void mana_remove(struct gdma_dev *gd, bool suspending);
388 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
389 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
390 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
391 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
392 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
393 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
395 extern const struct ethtool_ops mana_ethtool_ops;
397 struct mana_obj_spec {
405 enum mana_command_code {
406 MANA_QUERY_DEV_CONFIG = 0x20001,
407 MANA_QUERY_GF_STAT = 0x20002,
408 MANA_CONFIG_VPORT_TX = 0x20003,
409 MANA_CREATE_WQ_OBJ = 0x20004,
410 MANA_DESTROY_WQ_OBJ = 0x20005,
411 MANA_FENCE_RQ = 0x20006,
412 MANA_CONFIG_VPORT_RX = 0x20007,
413 MANA_QUERY_VPORT_CONFIG = 0x20008,
416 /* Query Device Configuration */
417 struct mana_query_device_cfg_req {
418 struct gdma_req_hdr hdr;
420 /* MANA Nic Driver Capability flags */
421 u64 mn_drv_cap_flags1;
422 u64 mn_drv_cap_flags2;
423 u64 mn_drv_cap_flags3;
424 u64 mn_drv_cap_flags4;
433 struct mana_query_device_cfg_resp {
434 struct gdma_resp_hdr hdr;
446 /* Query vPort Configuration */
447 struct mana_query_vport_cfg_req {
448 struct gdma_req_hdr hdr;
452 struct mana_query_vport_cfg_resp {
453 struct gdma_resp_hdr hdr;
456 u32 num_indirection_ent;
463 /* Configure vPort */
464 struct mana_config_vport_req {
465 struct gdma_req_hdr hdr;
471 struct mana_config_vport_resp {
472 struct gdma_resp_hdr hdr;
474 u8 short_form_allowed;
478 /* Create WQ Object */
479 struct mana_create_wqobj_req {
480 struct gdma_req_hdr hdr;
488 u32 cq_moderation_ctx_id;
492 struct mana_create_wqobj_resp {
493 struct gdma_resp_hdr hdr;
496 mana_handle_t wq_obj;
499 /* Destroy WQ Object */
500 struct mana_destroy_wqobj_req {
501 struct gdma_req_hdr hdr;
504 mana_handle_t wq_obj_handle;
507 struct mana_destroy_wqobj_resp {
508 struct gdma_resp_hdr hdr;
512 struct mana_fence_rq_req {
513 struct gdma_req_hdr hdr;
514 mana_handle_t wq_obj_handle;
517 struct mana_fence_rq_resp {
518 struct gdma_resp_hdr hdr;
521 /* Configure vPort Rx Steering */
522 struct mana_cfg_rx_steer_req {
523 struct gdma_req_hdr hdr;
525 u16 num_indir_entries;
526 u16 indir_tab_offset;
529 u8 update_default_rxobj;
533 mana_handle_t default_rxobj;
534 u8 hashkey[MANA_HASH_KEY_SIZE];
537 struct mana_cfg_rx_steer_resp {
538 struct gdma_resp_hdr hdr;
541 #define MANA_MAX_NUM_QUEUES 64
543 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
545 struct mana_tx_package {
546 struct gdma_wqe_request wqe_req;
547 struct gdma_sge sgl_array[5];
548 struct gdma_sge *sgl_ptr;
550 struct mana_tx_oob tx_oob;
552 struct gdma_posted_wqe_info wqe_info;