1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <uapi/linux/bpf.h>
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
11 #include <net/checksum.h>
12 #include <net/ip6_checksum.h>
16 /* Microsoft Azure Network Adapter (MANA) functions */
18 static int mana_open(struct net_device *ndev)
20 struct mana_port_context *apc = netdev_priv(ndev);
23 err = mana_alloc_queues(ndev);
27 apc->port_is_up = true;
29 /* Ensure port state updated before txq state */
32 netif_carrier_on(ndev);
33 netif_tx_wake_all_queues(ndev);
38 static int mana_close(struct net_device *ndev)
40 struct mana_port_context *apc = netdev_priv(ndev);
45 return mana_detach(ndev, true);
48 static bool mana_can_tx(struct gdma_queue *wq)
50 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
53 static unsigned int mana_checksum_info(struct sk_buff *skb)
55 if (skb->protocol == htons(ETH_P_IP)) {
56 struct iphdr *ip = ip_hdr(skb);
58 if (ip->protocol == IPPROTO_TCP)
61 if (ip->protocol == IPPROTO_UDP)
63 } else if (skb->protocol == htons(ETH_P_IPV6)) {
64 struct ipv6hdr *ip6 = ipv6_hdr(skb);
66 if (ip6->nexthdr == IPPROTO_TCP)
69 if (ip6->nexthdr == IPPROTO_UDP)
73 /* No csum offloading */
77 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
78 struct mana_tx_package *tp)
80 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
81 struct gdma_dev *gd = apc->ac->gdma_dev;
82 struct gdma_context *gc;
88 gc = gd->gdma_context;
90 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
92 if (dma_mapping_error(dev, da))
95 ash->dma_handle[0] = da;
96 ash->size[0] = skb_headlen(skb);
98 tp->wqe_req.sgl[0].address = ash->dma_handle[0];
99 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
100 tp->wqe_req.sgl[0].size = ash->size[0];
102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 frag = &skb_shinfo(skb)->frags[i];
104 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
107 if (dma_mapping_error(dev, da))
110 ash->dma_handle[i + 1] = da;
111 ash->size[i + 1] = skb_frag_size(frag);
113 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
114 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
115 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
121 for (i = i - 1; i >= 0; i--)
122 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
125 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
130 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
132 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
133 struct mana_port_context *apc = netdev_priv(ndev);
134 u16 txq_idx = skb_get_queue_mapping(skb);
135 struct gdma_dev *gd = apc->ac->gdma_dev;
136 bool ipv4 = false, ipv6 = false;
137 struct mana_tx_package pkg = {};
138 struct netdev_queue *net_txq;
139 struct mana_stats *tx_stats;
140 struct gdma_queue *gdma_sq;
141 unsigned int csum_type;
142 struct mana_txq *txq;
146 if (unlikely(!apc->port_is_up))
149 if (skb_cow_head(skb, MANA_HEADROOM))
152 txq = &apc->tx_qp[txq_idx].txq;
153 gdma_sq = txq->gdma_sq;
154 cq = &apc->tx_qp[txq_idx].tx_cq;
156 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
157 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
159 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
160 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
161 pkt_fmt = MANA_LONG_PKT_FMT;
163 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
166 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
168 if (pkt_fmt == MANA_SHORT_PKT_FMT)
169 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
171 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
173 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
174 pkg.wqe_req.flags = 0;
175 pkg.wqe_req.client_data_unit = 0;
177 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
178 WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
180 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
181 pkg.wqe_req.sgl = pkg.sgl_array;
183 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
184 sizeof(struct gdma_sge),
189 pkg.wqe_req.sgl = pkg.sgl_ptr;
192 if (skb->protocol == htons(ETH_P_IP))
194 else if (skb->protocol == htons(ETH_P_IPV6))
197 if (skb_is_gso(skb)) {
198 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
199 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
201 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
202 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
203 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
205 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
206 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
208 ip_hdr(skb)->tot_len = 0;
209 ip_hdr(skb)->check = 0;
210 tcp_hdr(skb)->check =
211 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
212 ip_hdr(skb)->daddr, 0,
215 ipv6_hdr(skb)->payload_len = 0;
216 tcp_hdr(skb)->check =
217 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
218 &ipv6_hdr(skb)->daddr, 0,
221 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
222 csum_type = mana_checksum_info(skb);
224 if (csum_type == IPPROTO_TCP) {
225 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
226 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
228 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
229 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
231 } else if (csum_type == IPPROTO_UDP) {
232 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
233 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
235 pkg.tx_oob.s_oob.comp_udp_csum = 1;
237 /* Can't do offload of this type of checksum */
238 if (skb_checksum_help(skb))
243 if (mana_map_skb(skb, apc, &pkg))
246 skb_queue_tail(&txq->pending_skbs, skb);
249 net_txq = netdev_get_tx_queue(ndev, txq_idx);
251 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
252 (struct gdma_posted_wqe_info *)skb->cb);
253 if (!mana_can_tx(gdma_sq)) {
254 netif_tx_stop_queue(net_txq);
255 apc->eth_stats.stop_queue++;
259 (void)skb_dequeue_tail(&txq->pending_skbs);
260 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
261 err = NETDEV_TX_BUSY;
266 atomic_inc(&txq->pending_sends);
268 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
270 /* skb may be freed after mana_gd_post_work_request. Do not use it. */
273 tx_stats = &txq->stats;
274 u64_stats_update_begin(&tx_stats->syncp);
276 tx_stats->bytes += len;
277 u64_stats_update_end(&tx_stats->syncp);
280 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
281 netif_tx_wake_queue(net_txq);
282 apc->eth_stats.wake_queue++;
291 ndev->stats.tx_dropped++;
293 dev_kfree_skb_any(skb);
297 static void mana_get_stats64(struct net_device *ndev,
298 struct rtnl_link_stats64 *st)
300 struct mana_port_context *apc = netdev_priv(ndev);
301 unsigned int num_queues = apc->num_queues;
302 struct mana_stats *stats;
307 if (!apc->port_is_up)
310 netdev_stats_to_stats64(st, &ndev->stats);
312 for (q = 0; q < num_queues; q++) {
313 stats = &apc->rxqs[q]->stats;
316 start = u64_stats_fetch_begin_irq(&stats->syncp);
317 packets = stats->packets;
318 bytes = stats->bytes;
319 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
321 st->rx_packets += packets;
322 st->rx_bytes += bytes;
325 for (q = 0; q < num_queues; q++) {
326 stats = &apc->tx_qp[q].txq.stats;
329 start = u64_stats_fetch_begin_irq(&stats->syncp);
330 packets = stats->packets;
331 bytes = stats->bytes;
332 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
334 st->tx_packets += packets;
335 st->tx_bytes += bytes;
339 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
342 struct mana_port_context *apc = netdev_priv(ndev);
343 u32 hash = skb_get_hash(skb);
344 struct sock *sk = skb->sk;
347 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
349 if (txq != old_q && sk && sk_fullsock(sk) &&
350 rcu_access_pointer(sk->sk_dst_cache))
351 sk_tx_queue_set(sk, txq);
356 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
357 struct net_device *sb_dev)
361 if (ndev->real_num_tx_queues == 1)
364 txq = sk_tx_queue_get(skb->sk);
366 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
367 if (skb_rx_queue_recorded(skb))
368 txq = skb_get_rx_queue(skb);
370 txq = mana_get_tx_queue(ndev, skb, txq);
376 static const struct net_device_ops mana_devops = {
377 .ndo_open = mana_open,
378 .ndo_stop = mana_close,
379 .ndo_select_queue = mana_select_queue,
380 .ndo_start_xmit = mana_start_xmit,
381 .ndo_validate_addr = eth_validate_addr,
382 .ndo_get_stats64 = mana_get_stats64,
386 static void mana_cleanup_port_context(struct mana_port_context *apc)
392 static int mana_init_port_context(struct mana_port_context *apc)
394 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
397 return !apc->rxqs ? -ENOMEM : 0;
400 static int mana_send_request(struct mana_context *ac, void *in_buf,
401 u32 in_len, void *out_buf, u32 out_len)
403 struct gdma_context *gc = ac->gdma_dev->gdma_context;
404 struct gdma_resp_hdr *resp = out_buf;
405 struct gdma_req_hdr *req = in_buf;
406 struct device *dev = gc->dev;
407 static atomic_t activity_id;
410 req->dev_id = gc->mana.dev_id;
411 req->activity_id = atomic_inc_return(&activity_id);
413 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
415 if (err || resp->status) {
416 dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
418 return err ? err : -EPROTO;
421 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
422 req->activity_id != resp->activity_id) {
423 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
424 req->dev_id.as_uint32, resp->dev_id.as_uint32,
425 req->activity_id, resp->activity_id);
432 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
433 const enum mana_command_code expected_code,
436 if (resp_hdr->response.msg_type != expected_code)
439 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
442 if (resp_hdr->response.msg_size < min_size)
448 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
449 u32 proto_minor_ver, u32 proto_micro_ver,
452 struct gdma_context *gc = ac->gdma_dev->gdma_context;
453 struct mana_query_device_cfg_resp resp = {};
454 struct mana_query_device_cfg_req req = {};
455 struct device *dev = gc->dev;
458 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
459 sizeof(req), sizeof(resp));
460 req.proto_major_ver = proto_major_ver;
461 req.proto_minor_ver = proto_minor_ver;
462 req.proto_micro_ver = proto_micro_ver;
464 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
466 dev_err(dev, "Failed to query config: %d", err);
470 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
472 if (err || resp.hdr.status) {
473 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
480 *max_num_vports = resp.max_num_vports;
485 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
486 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
488 struct mana_query_vport_cfg_resp resp = {};
489 struct mana_query_vport_cfg_req req = {};
492 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
493 sizeof(req), sizeof(resp));
495 req.vport_index = vport_index;
497 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
502 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
510 *max_sq = resp.max_num_sq;
511 *max_rq = resp.max_num_rq;
512 *num_indir_entry = resp.num_indirection_ent;
514 apc->port_handle = resp.vport;
515 ether_addr_copy(apc->mac_addr, resp.mac_addr);
520 static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
523 struct mana_config_vport_resp resp = {};
524 struct mana_config_vport_req req = {};
527 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
528 sizeof(req), sizeof(resp));
529 req.vport = apc->port_handle;
530 req.pdid = protection_dom_id;
531 req.doorbell_pageid = doorbell_pg_id;
533 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
536 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
540 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
542 if (err || resp.hdr.status) {
543 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
544 err, resp.hdr.status);
551 apc->tx_shortform_allowed = resp.short_form_allowed;
552 apc->tx_vp_offset = resp.tx_vport_offset;
557 static int mana_cfg_vport_steering(struct mana_port_context *apc,
559 bool update_default_rxobj, bool update_key,
562 u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
563 struct mana_cfg_rx_steer_req *req = NULL;
564 struct mana_cfg_rx_steer_resp resp = {};
565 struct net_device *ndev = apc->ndev;
566 mana_handle_t *req_indir_tab;
570 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
571 req = kzalloc(req_buf_size, GFP_KERNEL);
575 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
578 req->vport = apc->port_handle;
579 req->num_indir_entries = num_entries;
580 req->indir_tab_offset = sizeof(*req);
582 req->rss_enable = apc->rss_state;
583 req->update_default_rxobj = update_default_rxobj;
584 req->update_hashkey = update_key;
585 req->update_indir_tab = update_tab;
586 req->default_rxobj = apc->default_rxobj;
589 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
592 req_indir_tab = (mana_handle_t *)(req + 1);
593 memcpy(req_indir_tab, apc->rxobj_table,
594 req->num_indir_entries * sizeof(mana_handle_t));
597 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
600 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
604 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
607 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
611 if (resp.hdr.status) {
612 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
621 static int mana_create_wq_obj(struct mana_port_context *apc,
623 u32 wq_type, struct mana_obj_spec *wq_spec,
624 struct mana_obj_spec *cq_spec,
625 mana_handle_t *wq_obj)
627 struct mana_create_wqobj_resp resp = {};
628 struct mana_create_wqobj_req req = {};
629 struct net_device *ndev = apc->ndev;
632 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
633 sizeof(req), sizeof(resp));
635 req.wq_type = wq_type;
636 req.wq_gdma_region = wq_spec->gdma_region;
637 req.cq_gdma_region = cq_spec->gdma_region;
638 req.wq_size = wq_spec->queue_size;
639 req.cq_size = cq_spec->queue_size;
640 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
641 req.cq_parent_qid = cq_spec->attached_eq;
643 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
646 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
650 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
652 if (err || resp.hdr.status) {
653 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
660 if (resp.wq_obj == INVALID_MANA_HANDLE) {
661 netdev_err(ndev, "Got an invalid WQ object handle\n");
666 *wq_obj = resp.wq_obj;
667 wq_spec->queue_index = resp.wq_id;
668 cq_spec->queue_index = resp.cq_id;
675 static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
676 mana_handle_t wq_obj)
678 struct mana_destroy_wqobj_resp resp = {};
679 struct mana_destroy_wqobj_req req = {};
680 struct net_device *ndev = apc->ndev;
683 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
684 sizeof(req), sizeof(resp));
685 req.wq_type = wq_type;
686 req.wq_obj_handle = wq_obj;
688 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
691 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
695 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
697 if (err || resp.hdr.status)
698 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
702 static void mana_destroy_eq(struct mana_context *ac)
704 struct gdma_context *gc = ac->gdma_dev->gdma_context;
705 struct gdma_queue *eq;
711 for (i = 0; i < gc->max_num_queues; i++) {
716 mana_gd_destroy_queue(gc, eq);
723 static int mana_create_eq(struct mana_context *ac)
725 struct gdma_dev *gd = ac->gdma_dev;
726 struct gdma_context *gc = gd->gdma_context;
727 struct gdma_queue_spec spec = {};
731 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
737 spec.monitor_avl_buf = false;
738 spec.queue_size = EQ_SIZE;
739 spec.eq.callback = NULL;
740 spec.eq.context = ac->eqs;
741 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
743 for (i = 0; i < gc->max_num_queues; i++) {
744 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
755 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
757 struct mana_fence_rq_resp resp = {};
758 struct mana_fence_rq_req req = {};
761 init_completion(&rxq->fence_event);
763 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
764 sizeof(req), sizeof(resp));
765 req.wq_obj_handle = rxq->rxobj;
767 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
770 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
775 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
776 if (err || resp.hdr.status) {
777 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
778 rxq->rxq_idx, err, resp.hdr.status);
785 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
786 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
794 static void mana_fence_rqs(struct mana_port_context *apc)
796 unsigned int rxq_idx;
797 struct mana_rxq *rxq;
800 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
801 rxq = apc->rxqs[rxq_idx];
802 err = mana_fence_rq(apc, rxq);
804 /* In case of any error, use sleep instead. */
810 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
815 used_space_old = wq->head - wq->tail;
816 used_space_new = wq->head - (wq->tail + num_units);
818 if (WARN_ON_ONCE(used_space_new > used_space_old))
821 wq->tail += num_units;
825 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
827 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
828 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
829 struct device *dev = gc->dev;
832 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
834 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
835 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
839 static void mana_poll_tx_cq(struct mana_cq *cq)
841 struct gdma_comp *completions = cq->gdma_comp_buf;
842 struct gdma_posted_wqe_info *wqe_info;
843 unsigned int pkt_transmitted = 0;
844 unsigned int wqe_unit_cnt = 0;
845 struct mana_txq *txq = cq->txq;
846 struct mana_port_context *apc;
847 struct netdev_queue *net_txq;
848 struct gdma_queue *gdma_wq;
849 unsigned int avail_space;
850 struct net_device *ndev;
857 apc = netdev_priv(ndev);
859 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
865 for (i = 0; i < comp_read; i++) {
866 struct mana_tx_comp_oob *cqe_oob;
868 if (WARN_ON_ONCE(!completions[i].is_sq))
871 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
872 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
873 MANA_CQE_COMPLETION))
876 switch (cqe_oob->cqe_hdr.cqe_type) {
881 case CQE_TX_MTU_DROP:
882 case CQE_TX_INVALID_OOB:
883 case CQE_TX_INVALID_ETH_TYPE:
884 case CQE_TX_HDR_PROCESSING_ERROR:
885 case CQE_TX_VF_DISABLED:
886 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
887 case CQE_TX_VPORT_DISABLED:
888 case CQE_TX_VLAN_TAGGING_VIOLATION:
889 WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
890 cqe_oob->cqe_hdr.cqe_type);
894 /* If the CQE type is unexpected, log an error, assert,
895 * and go through the error path.
897 WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
898 cqe_oob->cqe_hdr.cqe_type);
902 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
905 skb = skb_dequeue(&txq->pending_skbs);
906 if (WARN_ON_ONCE(!skb))
909 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
910 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
912 mana_unmap_skb(skb, apc);
914 napi_consume_skb(skb, cq->budget);
919 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
922 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
924 gdma_wq = txq->gdma_sq;
925 avail_space = mana_gd_wq_avail_space(gdma_wq);
927 /* Ensure tail updated before checking q stop */
930 net_txq = txq->net_txq;
931 txq_stopped = netif_tx_queue_stopped(net_txq);
933 /* Ensure checking txq_stopped before apc->port_is_up. */
936 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
937 netif_tx_wake_queue(net_txq);
938 apc->eth_stats.wake_queue++;
941 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
944 cq->work_done = pkt_transmitted;
947 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
949 struct mana_recv_buf_oob *recv_buf_oob;
953 curr_index = rxq->buf_index++;
954 if (rxq->buf_index == rxq->num_rx_buf)
957 recv_buf_oob = &rxq->rx_oobs[curr_index];
959 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
960 &recv_buf_oob->wqe_inf);
961 if (WARN_ON_ONCE(err))
964 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
967 static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
968 struct xdp_buff *xdp)
970 struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
975 if (xdp->data_hard_start) {
976 skb_reserve(skb, xdp->data - xdp->data_hard_start);
977 skb_put(skb, xdp->data_end - xdp->data);
979 skb_reserve(skb, XDP_PACKET_HEADROOM);
980 skb_put(skb, pkt_len);
986 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
987 struct mana_rxq *rxq)
989 struct mana_stats *rx_stats = &rxq->stats;
990 struct net_device *ndev = rxq->ndev;
991 uint pkt_len = cqe->ppi[0].pkt_len;
992 u16 rxq_idx = rxq->rxq_idx;
993 struct napi_struct *napi;
994 struct xdp_buff xdp = {};
999 rxq->rx_cq.work_done++;
1000 napi = &rxq->rx_cq.napi;
1003 ++ndev->stats.rx_dropped;
1007 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1009 if (act != XDP_PASS && act != XDP_TX)
1012 skb = mana_build_skb(buf_va, pkt_len, &xdp);
1017 skb->dev = napi->dev;
1019 skb->protocol = eth_type_trans(skb, ndev);
1020 skb_checksum_none_assert(skb);
1021 skb_record_rx_queue(skb, rxq_idx);
1023 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1024 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1025 skb->ip_summed = CHECKSUM_UNNECESSARY;
1028 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1029 hash_value = cqe->ppi[0].pkt_hash;
1031 if (cqe->rx_hashtype & MANA_HASH_L4)
1032 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1034 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1037 if (act == XDP_TX) {
1038 skb_set_queue_mapping(skb, rxq_idx);
1039 mana_xdp_tx(skb, ndev);
1043 napi_gro_receive(napi, skb);
1045 u64_stats_update_begin(&rx_stats->syncp);
1046 rx_stats->packets++;
1047 rx_stats->bytes += pkt_len;
1048 u64_stats_update_end(&rx_stats->syncp);
1052 free_page((unsigned long)buf_va);
1053 ++ndev->stats.rx_dropped;
1057 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1058 struct gdma_comp *cqe)
1060 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1061 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1062 struct net_device *ndev = rxq->ndev;
1063 struct mana_recv_buf_oob *rxbuf_oob;
1064 struct device *dev = gc->dev;
1065 void *new_buf, *old_buf;
1066 struct page *new_page;
1070 switch (oob->cqe_hdr.cqe_type) {
1074 case CQE_RX_TRUNCATED:
1075 netdev_err(ndev, "Dropped a truncated packet\n");
1078 case CQE_RX_COALESCED_4:
1079 netdev_err(ndev, "RX coalescing is unsupported\n");
1082 case CQE_RX_OBJECT_FENCE:
1083 complete(&rxq->fence_event);
1087 netdev_err(ndev, "Unknown RX CQE type = %d\n",
1088 oob->cqe_hdr.cqe_type);
1092 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1095 pktlen = oob->ppi[0].pkt_len;
1098 /* data packets should never have packetlength of zero */
1099 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1100 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1104 curr = rxq->buf_index;
1105 rxbuf_oob = &rxq->rx_oobs[curr];
1106 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1108 new_page = alloc_page(GFP_ATOMIC);
1111 da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1114 if (dma_mapping_error(dev, da)) {
1115 __free_page(new_page);
1120 new_buf = new_page ? page_to_virt(new_page) : NULL;
1123 dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1126 old_buf = rxbuf_oob->buf_va;
1128 /* refresh the rxbuf_oob with the new page */
1129 rxbuf_oob->buf_va = new_buf;
1130 rxbuf_oob->buf_dma_addr = da;
1131 rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1133 old_buf = NULL; /* drop the packet if no memory */
1136 mana_rx_skb(old_buf, oob, rxq);
1138 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1140 mana_post_pkt_rxq(rxq);
1143 static void mana_poll_rx_cq(struct mana_cq *cq)
1145 struct gdma_comp *comp = cq->gdma_comp_buf;
1148 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1149 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1151 for (i = 0; i < comp_read; i++) {
1152 if (WARN_ON_ONCE(comp[i].is_sq))
1155 /* verify recv cqe references the right rxq */
1156 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1159 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1163 static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1165 struct mana_cq *cq = context;
1168 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1170 if (cq->type == MANA_CQ_TYPE_RX)
1171 mana_poll_rx_cq(cq);
1173 mana_poll_tx_cq(cq);
1175 if (cq->work_done < cq->budget &&
1176 napi_complete_done(&cq->napi, cq->work_done)) {
1177 arm_bit = SET_ARM_BIT;
1182 mana_gd_ring_cq(gdma_queue, arm_bit);
1185 static int mana_poll(struct napi_struct *napi, int budget)
1187 struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1190 cq->budget = budget;
1192 mana_cq_handler(cq, cq->gdma_cq);
1194 return min(cq->work_done, budget);
1197 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1199 struct mana_cq *cq = context;
1201 napi_schedule_irqoff(&cq->napi);
1204 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1206 struct gdma_dev *gd = apc->ac->gdma_dev;
1211 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1214 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1216 struct gdma_dev *gd = apc->ac->gdma_dev;
1221 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1224 static void mana_destroy_txq(struct mana_port_context *apc)
1226 struct napi_struct *napi;
1232 for (i = 0; i < apc->num_queues; i++) {
1233 napi = &apc->tx_qp[i].tx_cq.napi;
1234 napi_synchronize(napi);
1236 netif_napi_del(napi);
1238 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1240 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1242 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1249 static int mana_create_txq(struct mana_port_context *apc,
1250 struct net_device *net)
1252 struct mana_context *ac = apc->ac;
1253 struct gdma_dev *gd = ac->gdma_dev;
1254 struct mana_obj_spec wq_spec;
1255 struct mana_obj_spec cq_spec;
1256 struct gdma_queue_spec spec;
1257 struct gdma_context *gc;
1258 struct mana_txq *txq;
1265 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1270 /* The minimum size of the WQE is 32 bytes, hence
1271 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1272 * the SQ can store. This value is then used to size other queues
1273 * to prevent overflow.
1275 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1276 BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1278 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1279 cq_size = PAGE_ALIGN(cq_size);
1281 gc = gd->gdma_context;
1283 for (i = 0; i < apc->num_queues; i++) {
1284 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1287 txq = &apc->tx_qp[i].txq;
1289 u64_stats_init(&txq->stats.syncp);
1291 txq->net_txq = netdev_get_tx_queue(net, i);
1292 txq->vp_offset = apc->tx_vp_offset;
1293 skb_queue_head_init(&txq->pending_skbs);
1295 memset(&spec, 0, sizeof(spec));
1296 spec.type = GDMA_SQ;
1297 spec.monitor_avl_buf = true;
1298 spec.queue_size = txq_size;
1299 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1303 /* Create SQ's CQ */
1304 cq = &apc->tx_qp[i].tx_cq;
1305 cq->type = MANA_CQ_TYPE_TX;
1309 memset(&spec, 0, sizeof(spec));
1310 spec.type = GDMA_CQ;
1311 spec.monitor_avl_buf = false;
1312 spec.queue_size = cq_size;
1313 spec.cq.callback = mana_schedule_napi;
1314 spec.cq.parent_eq = ac->eqs[i].eq;
1315 spec.cq.context = cq;
1316 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1320 memset(&wq_spec, 0, sizeof(wq_spec));
1321 memset(&cq_spec, 0, sizeof(cq_spec));
1323 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1324 wq_spec.queue_size = txq->gdma_sq->queue_size;
1326 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1327 cq_spec.queue_size = cq->gdma_cq->queue_size;
1328 cq_spec.modr_ctx_id = 0;
1329 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1331 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1333 &apc->tx_qp[i].tx_object);
1338 txq->gdma_sq->id = wq_spec.queue_index;
1339 cq->gdma_cq->id = cq_spec.queue_index;
1341 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1342 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1344 txq->gdma_txq_id = txq->gdma_sq->id;
1346 cq->gdma_id = cq->gdma_cq->id;
1348 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1353 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1355 netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT);
1356 napi_enable(&cq->napi);
1358 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1363 mana_destroy_txq(apc);
1367 static void mana_destroy_rxq(struct mana_port_context *apc,
1368 struct mana_rxq *rxq, bool validate_state)
1371 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1372 struct mana_recv_buf_oob *rx_oob;
1373 struct device *dev = gc->dev;
1374 struct napi_struct *napi;
1380 napi = &rxq->rx_cq.napi;
1383 napi_synchronize(napi);
1387 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1389 netif_napi_del(napi);
1391 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1393 mana_deinit_cq(apc, &rxq->rx_cq);
1395 for (i = 0; i < rxq->num_rx_buf; i++) {
1396 rx_oob = &rxq->rx_oobs[i];
1398 if (!rx_oob->buf_va)
1401 dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1404 free_page((unsigned long)rx_oob->buf_va);
1405 rx_oob->buf_va = NULL;
1409 mana_gd_destroy_queue(gc, rxq->gdma_rq);
1414 #define MANA_WQE_HEADER_SIZE 16
1415 #define MANA_WQE_SGE_SIZE 16
1417 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1418 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1420 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1421 struct mana_recv_buf_oob *rx_oob;
1422 struct device *dev = gc->dev;
1427 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1432 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1433 rx_oob = &rxq->rx_oobs[buf_idx];
1434 memset(rx_oob, 0, sizeof(*rx_oob));
1436 page = alloc_page(GFP_KERNEL);
1440 da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1443 if (dma_mapping_error(dev, da)) {
1448 rx_oob->buf_va = page_to_virt(page);
1449 rx_oob->buf_dma_addr = da;
1451 rx_oob->num_sge = 1;
1452 rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1453 rx_oob->sgl[0].size = rxq->datasize;
1454 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1456 rx_oob->wqe_req.sgl = rx_oob->sgl;
1457 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1458 rx_oob->wqe_req.inline_oob_size = 0;
1459 rx_oob->wqe_req.inline_oob_data = NULL;
1460 rx_oob->wqe_req.flags = 0;
1461 rx_oob->wqe_req.client_data_unit = 0;
1463 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1464 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1465 *cq_size += COMP_ENTRY_SIZE;
1471 static int mana_push_wqe(struct mana_rxq *rxq)
1473 struct mana_recv_buf_oob *rx_oob;
1477 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1478 rx_oob = &rxq->rx_oobs[buf_idx];
1480 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1489 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1490 u32 rxq_idx, struct mana_eq *eq,
1491 struct net_device *ndev)
1493 struct gdma_dev *gd = apc->ac->gdma_dev;
1494 struct mana_obj_spec wq_spec;
1495 struct mana_obj_spec cq_spec;
1496 struct gdma_queue_spec spec;
1497 struct mana_cq *cq = NULL;
1498 struct gdma_context *gc;
1499 u32 cq_size, rq_size;
1500 struct mana_rxq *rxq;
1503 gc = gd->gdma_context;
1505 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1511 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1512 rxq->rxq_idx = rxq_idx;
1513 rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1514 rxq->rxobj = INVALID_MANA_HANDLE;
1516 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1520 rq_size = PAGE_ALIGN(rq_size);
1521 cq_size = PAGE_ALIGN(cq_size);
1524 memset(&spec, 0, sizeof(spec));
1525 spec.type = GDMA_RQ;
1526 spec.monitor_avl_buf = true;
1527 spec.queue_size = rq_size;
1528 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1532 /* Create RQ's CQ */
1534 cq->type = MANA_CQ_TYPE_RX;
1537 memset(&spec, 0, sizeof(spec));
1538 spec.type = GDMA_CQ;
1539 spec.monitor_avl_buf = false;
1540 spec.queue_size = cq_size;
1541 spec.cq.callback = mana_schedule_napi;
1542 spec.cq.parent_eq = eq->eq;
1543 spec.cq.context = cq;
1544 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1548 memset(&wq_spec, 0, sizeof(wq_spec));
1549 memset(&cq_spec, 0, sizeof(cq_spec));
1550 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1551 wq_spec.queue_size = rxq->gdma_rq->queue_size;
1553 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1554 cq_spec.queue_size = cq->gdma_cq->queue_size;
1555 cq_spec.modr_ctx_id = 0;
1556 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1558 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1559 &wq_spec, &cq_spec, &rxq->rxobj);
1563 rxq->gdma_rq->id = wq_spec.queue_index;
1564 cq->gdma_cq->id = cq_spec.queue_index;
1566 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1567 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1569 rxq->gdma_id = rxq->gdma_rq->id;
1570 cq->gdma_id = cq->gdma_cq->id;
1572 err = mana_push_wqe(rxq);
1576 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1581 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1583 netif_napi_add(ndev, &cq->napi, mana_poll, 1);
1585 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1587 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1588 MEM_TYPE_PAGE_SHARED, NULL));
1590 napi_enable(&cq->napi);
1592 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1597 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1599 mana_destroy_rxq(apc, rxq, false);
1602 mana_deinit_cq(apc, cq);
1607 static int mana_add_rx_queues(struct mana_port_context *apc,
1608 struct net_device *ndev)
1610 struct mana_context *ac = apc->ac;
1611 struct mana_rxq *rxq;
1615 for (i = 0; i < apc->num_queues; i++) {
1616 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1622 u64_stats_init(&rxq->stats.syncp);
1627 apc->default_rxobj = apc->rxqs[0]->rxobj;
1632 static void mana_destroy_vport(struct mana_port_context *apc)
1634 struct mana_rxq *rxq;
1637 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1638 rxq = apc->rxqs[rxq_idx];
1642 mana_destroy_rxq(apc, rxq, true);
1643 apc->rxqs[rxq_idx] = NULL;
1646 mana_destroy_txq(apc);
1649 static int mana_create_vport(struct mana_port_context *apc,
1650 struct net_device *net)
1652 struct gdma_dev *gd = apc->ac->gdma_dev;
1655 apc->default_rxobj = INVALID_MANA_HANDLE;
1657 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1661 return mana_create_txq(apc, net);
1664 static void mana_rss_table_init(struct mana_port_context *apc)
1668 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1669 apc->indir_table[i] =
1670 ethtool_rxfh_indir_default(i, apc->num_queues);
1673 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1674 bool update_hash, bool update_tab)
1681 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1682 queue_idx = apc->indir_table[i];
1683 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1687 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1691 mana_fence_rqs(apc);
1696 static int mana_init_port(struct net_device *ndev)
1698 struct mana_port_context *apc = netdev_priv(ndev);
1699 u32 max_txq, max_rxq, max_queues;
1700 int port_idx = apc->port_idx;
1701 u32 num_indirect_entries;
1704 err = mana_init_port_context(apc);
1708 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1709 &num_indirect_entries);
1711 netdev_err(ndev, "Failed to query info for vPort %d\n",
1716 max_queues = min_t(u32, max_txq, max_rxq);
1717 if (apc->max_queues > max_queues)
1718 apc->max_queues = max_queues;
1720 if (apc->num_queues > apc->max_queues)
1721 apc->num_queues = apc->max_queues;
1723 eth_hw_addr_set(ndev, apc->mac_addr);
1733 int mana_alloc_queues(struct net_device *ndev)
1735 struct mana_port_context *apc = netdev_priv(ndev);
1738 err = mana_create_vport(apc, ndev);
1742 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1746 err = mana_add_rx_queues(apc, ndev);
1750 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1752 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1756 mana_rss_table_init(apc);
1758 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1762 mana_chn_setxdp(apc, mana_xdp_get(apc));
1767 mana_destroy_vport(apc);
1771 int mana_attach(struct net_device *ndev)
1773 struct mana_port_context *apc = netdev_priv(ndev);
1778 err = mana_init_port(ndev);
1782 if (apc->port_st_save) {
1783 err = mana_alloc_queues(ndev);
1785 mana_cleanup_port_context(apc);
1790 apc->port_is_up = apc->port_st_save;
1792 /* Ensure port state updated before txq state */
1795 if (apc->port_is_up)
1796 netif_carrier_on(ndev);
1798 netif_device_attach(ndev);
1803 static int mana_dealloc_queues(struct net_device *ndev)
1805 struct mana_port_context *apc = netdev_priv(ndev);
1806 struct mana_txq *txq;
1809 if (apc->port_is_up)
1812 mana_chn_setxdp(apc, NULL);
1814 /* No packet can be transmitted now since apc->port_is_up is false.
1815 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1816 * a txq because it may not timely see apc->port_is_up being cleared
1817 * to false, but it doesn't matter since mana_start_xmit() drops any
1818 * new packets due to apc->port_is_up being false.
1820 * Drain all the in-flight TX packets
1822 for (i = 0; i < apc->num_queues; i++) {
1823 txq = &apc->tx_qp[i].txq;
1825 while (atomic_read(&txq->pending_sends) > 0)
1826 usleep_range(1000, 2000);
1829 /* We're 100% sure the queues can no longer be woken up, because
1830 * we're sure now mana_poll_tx_cq() can't be running.
1833 apc->rss_state = TRI_STATE_FALSE;
1834 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1836 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1840 mana_destroy_vport(apc);
1845 int mana_detach(struct net_device *ndev, bool from_close)
1847 struct mana_port_context *apc = netdev_priv(ndev);
1852 apc->port_st_save = apc->port_is_up;
1853 apc->port_is_up = false;
1855 /* Ensure port state updated before txq state */
1858 netif_tx_disable(ndev);
1859 netif_carrier_off(ndev);
1861 if (apc->port_st_save) {
1862 err = mana_dealloc_queues(ndev);
1868 netif_device_detach(ndev);
1869 mana_cleanup_port_context(apc);
1875 static int mana_probe_port(struct mana_context *ac, int port_idx,
1876 struct net_device **ndev_storage)
1878 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1879 struct mana_port_context *apc;
1880 struct net_device *ndev;
1883 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1884 gc->max_num_queues);
1888 *ndev_storage = ndev;
1890 apc = netdev_priv(ndev);
1893 apc->max_queues = gc->max_num_queues;
1894 apc->num_queues = gc->max_num_queues;
1895 apc->port_handle = INVALID_MANA_HANDLE;
1896 apc->port_idx = port_idx;
1898 ndev->netdev_ops = &mana_devops;
1899 ndev->ethtool_ops = &mana_ethtool_ops;
1900 ndev->mtu = ETH_DATA_LEN;
1901 ndev->max_mtu = ndev->mtu;
1902 ndev->min_mtu = ndev->mtu;
1903 ndev->needed_headroom = MANA_HEADROOM;
1904 SET_NETDEV_DEV(ndev, gc->dev);
1906 netif_carrier_off(ndev);
1908 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1910 err = mana_init_port(ndev);
1914 netdev_lockdep_set_classes(ndev);
1916 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1917 ndev->hw_features |= NETIF_F_RXCSUM;
1918 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1919 ndev->hw_features |= NETIF_F_RXHASH;
1920 ndev->features = ndev->hw_features;
1921 ndev->vlan_features = 0;
1923 err = register_netdev(ndev);
1925 netdev_err(ndev, "Unable to register netdev.\n");
1935 *ndev_storage = NULL;
1936 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1941 int mana_probe(struct gdma_dev *gd, bool resuming)
1943 struct gdma_context *gc = gd->gdma_context;
1944 struct mana_context *ac = gd->driver_data;
1945 struct device *dev = gc->dev;
1951 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1952 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1954 err = mana_gd_register_device(gd);
1959 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1964 gd->driver_data = ac;
1967 err = mana_create_eq(ac);
1971 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1972 MANA_MICRO_VERSION, &num_ports);
1977 ac->num_ports = num_ports;
1979 if (ac->num_ports != num_ports) {
1980 dev_err(dev, "The number of vPorts changed: %d->%d\n",
1981 ac->num_ports, num_ports);
1987 if (ac->num_ports == 0)
1988 dev_err(dev, "Failed to detect any vPort\n");
1990 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
1991 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
1994 for (i = 0; i < ac->num_ports; i++) {
1995 err = mana_probe_port(ac, i, &ac->ports[i]);
2000 for (i = 0; i < ac->num_ports; i++) {
2002 err = mana_attach(ac->ports[i]);
2010 mana_remove(gd, false);
2015 void mana_remove(struct gdma_dev *gd, bool suspending)
2017 struct gdma_context *gc = gd->gdma_context;
2018 struct mana_context *ac = gd->driver_data;
2019 struct device *dev = gc->dev;
2020 struct net_device *ndev;
2024 for (i = 0; i < ac->num_ports; i++) {
2025 ndev = ac->ports[i];
2028 dev_err(dev, "No net device to remove\n");
2032 /* All cleanup actions should stay after rtnl_lock(), otherwise
2033 * other functions may access partially cleaned up data.
2037 err = mana_detach(ndev, false);
2039 netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2043 /* No need to unregister the ndev. */
2048 unregister_netdevice(ndev);
2055 mana_destroy_eq(ac);
2058 mana_gd_deregister_device(gd);
2063 gd->driver_data = NULL;
2064 gd->gdma_context = NULL;