rtase: Implement a function to receive packets
authorJustin Lai <justinlai0215@realtek.com>
Wed, 4 Sep 2024 03:21:08 +0000 (11:21 +0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 6 Sep 2024 05:02:38 +0000 (22:02 -0700)
Implement rx_handler to read the information of the rx descriptor,
thereby checking the packet accordingly and storing the packet
in the socket buffer to complete the reception of the packet.

Signed-off-by: Justin Lai <justinlai0215@realtek.com>
Link: https://patch.msgid.link/20240904032114.247117-8-justinlai0215@realtek.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/realtek/rtase/rtase_main.c

index bef5a94..361e9f2 100644 (file)
@@ -436,6 +436,150 @@ static void rtase_rx_ring_clear(struct page_pool *page_pool,
        }
 }
 
+static int rtase_fragmented_frame(u32 status)
+{
+       return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
+              (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
+}
+
+static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
+                         const union rtase_rx_desc *desc)
+{
+       u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+       /* rx csum offload */
+       if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
+           (opts2 & RTASE_RX_V6F)) {
+               if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
+                   ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb->ip_summed = CHECKSUM_NONE;
+       } else {
+               skb->ip_summed = CHECKSUM_NONE;
+       }
+}
+
+static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
+{
+       u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+       if (!(opts2 & RTASE_RX_VLAN_TAG))
+               return;
+
+       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                              swab16(opts2 & RTASE_VLAN_TAG_MASK));
+}
+
+static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
+{
+       struct rtase_int_vector *ivec = ring->ivec;
+
+       napi_gro_receive(&ivec->napi, skb);
+}
+
+static int rx_handler(struct rtase_ring *ring, int budget)
+{
+       union rtase_rx_desc *desc_base = ring->desc;
+       u32 pkt_size, cur_rx, delta, entry, status;
+       struct rtase_private *tp = ring->ivec->tp;
+       struct net_device *dev = tp->dev;
+       union rtase_rx_desc *desc;
+       struct sk_buff *skb;
+       int workdone = 0;
+
+       cur_rx = ring->cur_idx;
+       entry = cur_rx % RTASE_NUM_DESC;
+       desc = &desc_base[entry];
+
+       while (workdone < budget) {
+               status = le32_to_cpu(desc->desc_status.opts1);
+
+               if (status & RTASE_DESC_OWN)
+                       break;
+
+               /* This barrier is needed to keep us from reading
+                * any other fields out of the rx descriptor until
+                * we know the status of RTASE_DESC_OWN
+                */
+               dma_rmb();
+
+               if (unlikely(status & RTASE_RX_RES)) {
+                       if (net_ratelimit())
+                               netdev_warn(dev, "Rx ERROR. status = %08x\n",
+                                           status);
+
+                       tp->stats.rx_errors++;
+
+                       if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
+                               tp->stats.rx_length_errors++;
+
+                       if (status & RTASE_RX_CRC)
+                               tp->stats.rx_crc_errors++;
+
+                       if (dev->features & NETIF_F_RXALL)
+                               goto process_pkt;
+
+                       rtase_mark_to_asic(desc, tp->rx_buf_sz);
+                       goto skip_process_pkt;
+               }
+
+process_pkt:
+               pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
+               if (likely(!(dev->features & NETIF_F_RXFCS)))
+                       pkt_size -= ETH_FCS_LEN;
+
+               /* The driver does not support incoming fragmented frames.
+                * They are seen as a symptom of over-mtu sized frames.
+                */
+               if (unlikely(rtase_fragmented_frame(status))) {
+                       tp->stats.rx_dropped++;
+                       tp->stats.rx_length_errors++;
+                       rtase_mark_to_asic(desc, tp->rx_buf_sz);
+                       goto skip_process_pkt;
+               }
+
+               dma_sync_single_for_cpu(&tp->pdev->dev,
+                                       ring->mis.data_phy_addr[entry],
+                                       tp->rx_buf_sz, DMA_FROM_DEVICE);
+
+               skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
+               if (!skb) {
+                       tp->stats.rx_dropped++;
+                       rtase_mark_to_asic(desc, tp->rx_buf_sz);
+                       goto skip_process_pkt;
+               }
+               ring->data_buf[entry] = NULL;
+
+               if (dev->features & NETIF_F_RXCSUM)
+                       rtase_rx_csum(tp, skb, desc);
+
+               skb_put(skb, pkt_size);
+               skb_mark_for_recycle(skb);
+               skb->protocol = eth_type_trans(skb, dev);
+
+               if (skb->pkt_type == PACKET_MULTICAST)
+                       tp->stats.multicast++;
+
+               rtase_rx_vlan_skb(desc, skb);
+               rtase_rx_skb(ring, skb);
+
+               dev_sw_netstats_rx_add(dev, pkt_size);
+
+skip_process_pkt:
+               workdone++;
+               cur_rx++;
+               entry = cur_rx % RTASE_NUM_DESC;
+               desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
+       }
+
+       ring->cur_idx = cur_rx;
+       delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
+       ring->dirty_idx += delta;
+
+       return workdone;
+}
+
 static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
 {
        struct rtase_ring *ring = &tp->rx_ring[idx];