1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 #include <linux/dma-mapping.h>
6 #include <linux/skbuff.h>
8 #include <uapi/linux/udp.h>
10 #include "funeth_ktls.h"
11 #include "funeth_txrx.h"
12 #include "funeth_trace.h"
13 #include "fun_queue.h"
15 #define FUN_XDP_CLEAN_THRES 32
16 #define FUN_XDP_CLEAN_BATCH 16
18 /* DMA-map a packet and return the (length, DMA_address) pairs for its
19 * segments. If a mapping error occurs -ENOMEM is returned.
21 static int map_skb(const struct sk_buff *skb, struct device *dev,
22 dma_addr_t *addr, unsigned int *len)
24 const struct skb_shared_info *si;
25 const skb_frag_t *fp, *end;
27 *len = skb_headlen(skb);
28 *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
29 if (dma_mapping_error(dev, *addr))
33 end = &si->frags[si->nr_frags];
35 for (fp = si->frags; fp < end; fp++) {
36 *++len = skb_frag_size(fp);
37 *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
38 if (dma_mapping_error(dev, *addr))
44 while (fp-- > si->frags)
45 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
47 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
51 /* Return the address just past the end of a Tx queue's descriptor ring.
52 * It exploits the fact that the HW writeback area is just after the end
53 * of the descriptor ring.
55 static void *txq_end(const struct funeth_txq *q)
57 return (void *)q->hw_wb;
60 /* Return the amount of space within a Tx ring from the given address to the
63 static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
65 return txq_end(q) - p;
68 /* Return the number of Tx descriptors occupied by a Tx request. */
69 static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
71 return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
74 static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
76 return *(__be16 *)&tcp_flag_word(th);
79 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
80 unsigned int *tls_len)
82 #if IS_ENABLED(CONFIG_TLS_DEVICE)
83 const struct fun_ktls_tx_ctx *tls_ctx;
86 datalen = skb->len - skb_tcp_all_headers(skb);
90 if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
91 seq = ntohl(tcp_hdr(skb)->seq);
92 tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
94 if (likely(tls_ctx->next_seq == seq)) {
98 if (seq - tls_ctx->next_seq < U32_MAX / 4) {
99 tls_offload_tx_resync_request(skb->sk, seq,
104 FUN_QSTAT_INC(q, tx_tls_fallback);
105 skb = tls_encrypt_skb(skb);
107 FUN_QSTAT_INC(q, tx_tls_drops);
115 /* Write as many descriptors as needed for the supplied skb starting at the
116 * current producer location. The caller has made certain enough descriptors
119 * Returns the number of descriptors written, 0 on error.
121 static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
122 unsigned int tls_len)
124 unsigned int extra_bytes = 0, extra_pkts = 0;
125 unsigned int idx = q->prod_cnt & q->mask;
126 const struct skb_shared_info *shinfo;
127 unsigned int lens[MAX_SKB_FRAGS + 1];
128 dma_addr_t addrs[MAX_SKB_FRAGS + 1];
129 struct fun_eth_tx_req *req;
130 struct fun_dataop_gl *gle;
131 const struct tcphdr *th;
132 unsigned int ngle, i;
133 unsigned int l4_hlen;
136 if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
137 FUN_QSTAT_INC(q, tx_map_err);
141 req = fun_tx_desc_addr(q, idx);
142 req->op = FUN_ETH_OP_TX;
145 req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
147 req->encap_proto = 0;
149 shinfo = skb_shinfo(skb);
150 if (likely(shinfo->gso_size)) {
151 if (skb->encapsulation) {
154 flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
155 FUN_ETH_UPDATE_INNER_L4_CKSUM |
156 FUN_ETH_UPDATE_OUTER_L3_LEN;
157 if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
158 SKB_GSO_UDP_TUNNEL_CSUM)) {
159 flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
161 if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
162 flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
163 ol4_ofst = skb_transport_offset(skb);
165 ol4_ofst = skb_inner_network_offset(skb);
168 if (ip_hdr(skb)->version == 4)
169 flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
171 flags |= FUN_ETH_OUTER_IPV6;
173 if (skb->inner_network_header) {
174 if (inner_ip_hdr(skb)->version == 4)
175 flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
176 FUN_ETH_UPDATE_INNER_L3_LEN;
178 flags |= FUN_ETH_INNER_IPV6 |
179 FUN_ETH_UPDATE_INNER_L3_LEN;
181 th = inner_tcp_hdr(skb);
182 l4_hlen = __tcp_hdrlen(th);
183 fun_eth_offload_init(&req->offload, flags,
185 tcp_hdr_doff_flags(th), 0,
186 skb_inner_network_offset(skb),
187 skb_inner_transport_offset(skb),
188 skb_network_offset(skb), ol4_ofst);
189 FUN_QSTAT_INC(q, tx_encap_tso);
190 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
191 flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
192 FUN_ETH_UPDATE_INNER_L4_CKSUM |
193 FUN_ETH_UPDATE_INNER_L4_LEN |
194 FUN_ETH_UPDATE_INNER_L3_LEN;
196 if (ip_hdr(skb)->version == 4)
197 flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
199 flags |= FUN_ETH_INNER_IPV6;
201 l4_hlen = sizeof(struct udphdr);
202 fun_eth_offload_init(&req->offload, flags,
204 cpu_to_be16(l4_hlen << 10), 0,
205 skb_network_offset(skb),
206 skb_transport_offset(skb), 0, 0);
207 FUN_QSTAT_INC(q, tx_uso);
209 /* HW considers one set of headers as inner */
210 flags = FUN_ETH_INNER_LSO |
211 FUN_ETH_UPDATE_INNER_L4_CKSUM |
212 FUN_ETH_UPDATE_INNER_L3_LEN;
213 if (shinfo->gso_type & SKB_GSO_TCPV6)
214 flags |= FUN_ETH_INNER_IPV6;
216 flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
218 l4_hlen = __tcp_hdrlen(th);
219 fun_eth_offload_init(&req->offload, flags,
221 tcp_hdr_doff_flags(th), 0,
222 skb_network_offset(skb),
223 skb_transport_offset(skb), 0, 0);
224 FUN_QSTAT_INC(q, tx_tso);
227 u64_stats_update_begin(&q->syncp);
228 q->stats.tx_cso += shinfo->gso_segs;
229 u64_stats_update_end(&q->syncp);
231 extra_pkts = shinfo->gso_segs - 1;
232 extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
233 l4_hlen) * extra_pkts;
234 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
235 flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
236 if (skb->csum_offset == offsetof(struct udphdr, check))
237 flags |= FUN_ETH_INNER_UDP;
238 fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
239 skb_checksum_start_offset(skb), 0, 0);
240 FUN_QSTAT_INC(q, tx_cso);
242 fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
245 ngle = shinfo->nr_frags + 1;
246 req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
247 req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
249 for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
250 i < ngle && txq_to_end(q, gle); i++, gle++)
251 fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
253 if (txq_to_end(q, gle) == 0) {
254 gle = (struct fun_dataop_gl *)q->desc;
255 for ( ; i < ngle; i++, gle++)
256 fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
259 if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
260 struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
261 struct fun_ktls_tx_ctx *tls_ctx;
263 req->len8 += FUNETH_TLS_SZ / 8;
264 req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
266 tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
267 tls->tlsid = tls_ctx->tlsid;
268 tls_ctx->next_seq += tls_len;
270 u64_stats_update_begin(&q->syncp);
271 q->stats.tx_tls_bytes += tls_len;
272 q->stats.tx_tls_pkts += 1 + extra_pkts;
273 u64_stats_update_end(&q->syncp);
276 u64_stats_update_begin(&q->syncp);
277 q->stats.tx_bytes += skb->len + extra_bytes;
278 q->stats.tx_pkts += 1 + extra_pkts;
279 u64_stats_update_end(&q->syncp);
281 q->info[idx].skb = skb;
283 trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
284 return tx_req_ndesc(req);
287 /* Return the number of available descriptors of a Tx queue.
288 * HW assumes head==tail means the ring is empty so we need to keep one
291 static unsigned int fun_txq_avail(const struct funeth_txq *q)
293 return q->mask - q->prod_cnt + q->cons_cnt;
296 /* Stop a queue if it can't handle another worst-case packet. */
297 static void fun_tx_check_stop(struct funeth_txq *q)
299 if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
302 netif_tx_stop_queue(q->ndq);
304 /* NAPI reclaim is freeing packets in parallel with us and we may race.
305 * We have stopped the queue but check again after synchronizing with
309 if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
310 FUN_QSTAT_INC(q, tx_nstops);
312 netif_tx_start_queue(q->ndq);
315 /* Return true if a queue has enough space to restart. Current condition is
316 * that the queue must be >= 1/4 empty.
318 static bool fun_txq_may_restart(struct funeth_txq *q)
320 return fun_txq_avail(q) >= q->mask / 4;
323 netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
325 struct funeth_priv *fp = netdev_priv(netdev);
326 unsigned int qid = skb_get_queue_mapping(skb);
327 struct funeth_txq *q = fp->txqs[qid];
328 unsigned int tls_len = 0;
331 if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
332 tls_is_sk_tx_device_offloaded(skb->sk)) {
333 skb = fun_tls_tx(skb, q, &tls_len);
338 ndesc = write_pkt_desc(skb, q, tls_len);
339 if (unlikely(!ndesc)) {
340 dev_kfree_skb_any(skb);
344 q->prod_cnt += ndesc;
345 fun_tx_check_stop(q);
347 skb_tx_timestamp(skb);
349 if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
352 FUN_QSTAT_INC(q, tx_more);
357 /* A dropped packet may be the last one in a xmit_more train,
358 * ring the doorbell just in case.
360 if (!netdev_xmit_more())
365 /* Return a Tx queue's HW head index written back to host memory. */
366 static u16 txq_hw_head(const struct funeth_txq *q)
368 return (u16)be64_to_cpu(*q->hw_wb);
371 /* Unmap the Tx packet starting at the given descriptor index and
372 * return the number of Tx descriptors it occupied.
374 static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
376 const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
377 unsigned int ngle = req->dataop.ngather;
378 struct fun_dataop_gl *gle;
381 gle = (struct fun_dataop_gl *)req->dataop.imm;
382 dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
383 be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
385 for (gle++; --ngle && txq_to_end(q, gle); gle++)
386 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
387 be32_to_cpu(gle->sgl_len),
390 for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
391 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
392 be32_to_cpu(gle->sgl_len),
396 return tx_req_ndesc(req);
399 /* Reclaim completed Tx descriptors and free their packets. Restart a stopped
400 * queue if we freed enough descriptors.
402 * Return true if we exhausted the budget while there is more work to be done.
404 static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
406 unsigned int npkts = 0, nbytes = 0, ndesc = 0;
407 unsigned int head, limit, reclaim_idx;
409 /* budget may be 0, e.g., netpoll */
410 limit = budget ? budget : UINT_MAX;
412 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
413 head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
414 /* The HW head is continually updated, ensure we don't read
415 * descriptor state before the head tells us to reclaim it.
416 * On the enqueue side the doorbell is an implicit write
422 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
423 struct sk_buff *skb = q->info[reclaim_idx].skb;
425 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
428 napi_consume_skb(skb, budget);
430 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
432 } while (reclaim_idx != head && npkts < limit);
435 q->cons_cnt += ndesc;
436 netdev_tx_completed_queue(q->ndq, npkts, nbytes);
437 smp_mb(); /* pairs with the one in fun_tx_check_stop() */
439 if (unlikely(netif_tx_queue_stopped(q->ndq) &&
440 fun_txq_may_restart(q))) {
441 netif_tx_wake_queue(q->ndq);
442 FUN_QSTAT_INC(q, tx_nrestarts);
445 return reclaim_idx != head;
448 /* The NAPI handler for Tx queues. */
449 int fun_txq_napi_poll(struct napi_struct *napi, int budget)
451 struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
452 struct funeth_txq *q = irq->txq;
455 if (fun_txq_reclaim(q, budget))
456 return budget; /* exhausted budget */
458 napi_complete(napi); /* exhausted pending work */
459 db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
460 writel(db_val, q->db);
464 /* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
465 static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
467 unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
469 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
470 head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
471 /* The HW head is continually updated, ensure we don't read
472 * descriptor state before the head tells us to reclaim it.
473 * On the enqueue side the doorbell is an implicit write
479 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
481 xdp_return_frame(q->info[reclaim_idx].xdpf);
483 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
485 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
488 } while (reclaim_idx != head && npkts < budget);
491 q->cons_cnt += ndesc;
495 bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
497 struct fun_eth_tx_req *req;
498 struct fun_dataop_gl *gle;
499 unsigned int idx, len;
502 if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
503 fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
505 if (!unlikely(fun_txq_avail(q))) {
506 FUN_QSTAT_INC(q, tx_xdp_full);
511 dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
512 if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
513 FUN_QSTAT_INC(q, tx_map_err);
517 idx = q->prod_cnt & q->mask;
518 req = fun_tx_desc_addr(q, idx);
519 req->op = FUN_ETH_OP_TX;
520 req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
522 req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
524 req->encap_proto = 0;
525 fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
526 req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
528 gle = (struct fun_dataop_gl *)req->dataop.imm;
529 fun_dataop_gl_init(gle, 0, 0, len, dma);
531 q->info[idx].xdpf = xdpf;
533 u64_stats_update_begin(&q->syncp);
534 q->stats.tx_bytes += len;
536 u64_stats_update_end(&q->syncp);
538 trace_funeth_tx(q, len, idx, 1);
544 int fun_xdp_xmit_frames(struct net_device *dev, int n,
545 struct xdp_frame **frames, u32 flags)
547 struct funeth_priv *fp = netdev_priv(dev);
548 struct funeth_txq *q, **xdpqs;
551 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
554 xdpqs = rcu_dereference_bh(fp->xdpqs);
555 if (unlikely(!xdpqs))
558 q_idx = smp_processor_id();
559 if (unlikely(q_idx >= fp->num_xdpqs))
562 for (q = xdpqs[q_idx], i = 0; i < n; i++)
563 if (!fun_xdp_tx(q, frames[i]))
566 if (unlikely(flags & XDP_XMIT_FLUSH))
571 /* Purge a Tx queue of any queued packets. Should be called once HW access
572 * to the packets has been revoked, e.g., after the queue has been disabled.
574 static void fun_txq_purge(struct funeth_txq *q)
576 while (q->cons_cnt != q->prod_cnt) {
577 unsigned int idx = q->cons_cnt & q->mask;
579 q->cons_cnt += fun_unmap_pkt(q, idx);
580 dev_kfree_skb_any(q->info[idx].skb);
582 netdev_tx_reset_queue(q->ndq);
585 static void fun_xdpq_purge(struct funeth_txq *q)
587 while (q->cons_cnt != q->prod_cnt) {
588 unsigned int idx = q->cons_cnt & q->mask;
590 q->cons_cnt += fun_unmap_pkt(q, idx);
591 xdp_return_frame(q->info[idx].xdpf);
595 /* Create a Tx queue, allocating all the host resources needed. */
596 static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
601 struct funeth_priv *fp = netdev_priv(dev);
602 struct funeth_txq *q;
606 numa_node = fun_irq_node(irq); /* skb Tx queue */
608 numa_node = cpu_to_node(qidx); /* XDP Tx queue */
610 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
614 q->dma_dev = &fp->pdev->dev;
615 q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
616 sizeof(*q->info), true, numa_node,
617 &q->dma_addr, (void **)&q->info,
625 q->numa_node = numa_node;
626 u64_stats_init(&q->syncp);
627 q->init_state = FUN_QSTATE_INIT_SW;
633 netdev_err(dev, "Can't allocate memory for %s queue %u\n",
634 irq ? "Tx" : "XDP", qidx);
638 static void fun_txq_free_sw(struct funeth_txq *q)
640 struct funeth_priv *fp = netdev_priv(q->netdev);
642 fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
643 q->desc, q->dma_addr, q->info);
645 fp->tx_packets += q->stats.tx_pkts;
646 fp->tx_bytes += q->stats.tx_bytes;
647 fp->tx_dropped += q->stats.tx_map_err;
652 /* Allocate the device portion of a Tx queue. */
653 int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
655 struct funeth_priv *fp = netdev_priv(q->netdev);
656 unsigned int irq_idx, ndesc = q->mask + 1;
663 irq_idx = irq ? irq->irq_idx : 0;
665 err = fun_sq_create(fp->fdev,
666 FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
667 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
668 FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
669 q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
670 irq_idx, 0, fp->fdev->kern_end_qid, 0,
675 err = fun_create_and_bind_tx(fp, q->hw_qid);
682 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
683 q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
685 writel(q->irq_db_val, q->db);
688 q->init_state = FUN_QSTATE_INIT_FULL;
689 netif_info(fp, ifup, q->netdev,
690 "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
691 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
692 q->ethid, q->numa_node);
696 fun_destroy_sq(fp->fdev, q->hw_qid);
698 netdev_err(q->netdev,
699 "Failed to create %s queue %u on device, error %d\n",
700 irq ? "Tx" : "XDP", q->qidx, err);
704 static void fun_txq_free_dev(struct funeth_txq *q)
706 struct funeth_priv *fp = netdev_priv(q->netdev);
708 if (q->init_state < FUN_QSTATE_INIT_FULL)
711 netif_info(fp, ifdown, q->netdev,
712 "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
713 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
714 q->irq ? q->irq->irq_idx : 0, q->ethid);
716 fun_destroy_sq(fp->fdev, q->hw_qid);
717 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
726 q->init_state = FUN_QSTATE_INIT_SW;
729 /* Create or advance a Tx queue, allocating all the host and device resources
730 * needed to reach the target state.
732 int funeth_txq_create(struct net_device *dev, unsigned int qidx,
733 unsigned int ndesc, struct fun_irq *irq, int state,
734 struct funeth_txq **qp)
736 struct funeth_txq *q = *qp;
740 q = fun_txq_create_sw(dev, qidx, ndesc, irq);
744 if (q->init_state >= state)
747 err = fun_txq_create_dev(q, irq);
759 /* Free Tx queue resources until it reaches the target state.
760 * The queue must be already disconnected from the stack.
762 struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
764 if (state < FUN_QSTATE_INIT_FULL)
767 if (state == FUN_QSTATE_DESTROYED) {