Merge tag 'amlogic-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman...
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / mvneta.c
index 4a9041e..bc4d8d1 100644 (file)
@@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
                                 struct netdev_queue *nq, bool napi)
 {
        unsigned int bytes_compl = 0, pkts_compl = 0;
+       struct xdp_frame_bulk bq;
        int i;
 
+       xdp_frame_bulk_init(&bq);
+
+       rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
        for (i = 0; i < num; i++) {
                struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
                struct mvneta_tx_desc *tx_desc = txq->descs +
@@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
                        if (napi && buf->type == MVNETA_TYPE_XDP_TX)
                                xdp_return_frame_rx_napi(buf->xdpf);
                        else
-                               xdp_return_frame(buf->xdpf);
+                               xdp_return_frame_bulk(buf->xdpf, &bq);
                }
        }
+       xdp_flush_frame_bulk(&bq);
+
+       rcu_read_unlock();
 
        netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
 }
@@ -2025,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
 
 static void
 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
-                   struct xdp_buff *xdp, int sync_len, bool napi)
+                   struct xdp_buff *xdp, struct skb_shared_info *sinfo,
+                   int sync_len)
 {
-       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
        int i;
 
        for (i = 0; i < sinfo->nr_frags; i++)
                page_pool_put_full_page(rxq->page_pool,
-                                       skb_frag_page(&sinfo->frags[i]), napi);
+                                       skb_frag_page(&sinfo->frags[i]), true);
        page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
-                          sync_len, napi);
+                          sync_len, true);
 }
 
 static int
@@ -2171,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
               struct bpf_prog *prog, struct xdp_buff *xdp,
               u32 frame_sz, struct mvneta_stats *stats)
 {
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
        unsigned int len, data_len, sync;
        u32 ret, act;
 
@@ -2191,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
                err = xdp_do_redirect(pp->dev, xdp, prog);
                if (unlikely(err)) {
-                       mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+                       mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
                        ret = MVNETA_XDP_DROPPED;
                } else {
                        ret = MVNETA_XDP_REDIR;
@@ -2202,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        case XDP_TX:
                ret = mvneta_xdp_xmit_back(pp, xdp);
                if (ret != MVNETA_XDP_TX)
-                       mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+                       mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2211,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                trace_xdp_exception(pp->dev, prog, act);
                fallthrough;
        case XDP_DROP:
-               mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+               mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
                ret = MVNETA_XDP_DROPPED;
                stats->xdp_drop++;
                break;
@@ -2269,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
                            struct mvneta_rx_desc *rx_desc,
                            struct mvneta_rx_queue *rxq,
                            struct xdp_buff *xdp, int *size,
+                           struct skb_shared_info *xdp_sinfo,
                            struct page *page)
 {
-       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
        struct net_device *dev = pp->dev;
        enum dma_data_direction dma_dir;
        int data_len, len;
@@ -2289,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
                                len, dma_dir);
        rx_desc->buf_phys_addr = 0;
 
-       if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
-               skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
+       if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
+               skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
 
                skb_frag_off_set(frag, pp->rx_offset_correction);
                skb_frag_size_set(frag, data_len);
                __skb_frag_set_page(frag, page);
-               sinfo->nr_frags++;
+
+               /* last fragment */
+               if (len == *size) {
+                       struct skb_shared_info *sinfo;
+
+                       sinfo = xdp_get_shared_info_from_buff(xdp);
+                       sinfo->nr_frags = xdp_sinfo->nr_frags;
+                       memcpy(sinfo->frags, xdp_sinfo->frags,
+                              sinfo->nr_frags * sizeof(skb_frag_t));
+               }
        } else {
                page_pool_put_full_page(rxq->page_pool, page, true);
        }
@@ -2339,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 {
        int rx_proc = 0, rx_todo, refill, size = 0;
        struct net_device *dev = pp->dev;
-       struct xdp_buff xdp_buf = {
-               .frame_sz = PAGE_SIZE,
-               .rxq = &rxq->xdp_rxq,
-       };
+       struct skb_shared_info sinfo;
        struct mvneta_stats ps = {};
        struct bpf_prog *xdp_prog;
        u32 desc_status, frame_sz;
+       struct xdp_buff xdp_buf;
+
+       xdp_buf.data_hard_start = NULL;
+       xdp_buf.frame_sz = PAGE_SIZE;
+       xdp_buf.rxq = &rxq->xdp_rxq;
+
+       sinfo.nr_frags = 0;
 
        /* Get number of received packets */
        rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2385,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                                rx_desc->buf_phys_addr = 0;
                                page_pool_put_full_page(rxq->page_pool, page,
                                                        true);
-                               continue;
+                               goto next;
                        }
 
                        mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
-                                                   &size, page);
+                                                   &size, &sinfo, page);
                } /* Middle or Last descriptor */
 
                if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2397,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                        continue;
 
                if (size) {
-                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
                        goto next;
                }
 
@@ -2409,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                if (IS_ERR(skb)) {
                        struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
-                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
 
                        u64_stats_update_begin(&stats->syncp);
                        stats->es.skb_alloc_error++;
@@ -2426,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                napi_gro_receive(napi, skb);
 next:
                xdp_buf.data_hard_start = NULL;
+               sinfo.nr_frags = 0;
        }
        rcu_read_unlock();
 
        if (xdp_buf.data_hard_start)
-               mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+               mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
 
        if (ps.xdp_redirect)
                xdp_do_flush_map();
@@ -3220,7 +3243,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
                return err;
        }
 
-       err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+       err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
        if (err < 0)
                goto err_free_pp;
 
@@ -4409,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
        struct bpf_prog *old_prog;
 
        if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
-               NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+               NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
                return -EOPNOTSUPP;
        }
 
@@ -5232,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
        err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               return err;
+               goto err_netdev;
        }
 
        /* Armada3700 network controller does not support per-cpu