tp->ucopy.memory = 0;
 }
 
+#ifdef CONFIG_NET_DMA
+static void tcp_service_net_dma(struct sock *sk, bool wait)
+{
+       dma_cookie_t done, used;
+       dma_cookie_t last_issued;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (!tp->ucopy.dma_chan)
+               return;
+
+       last_issued = tp->ucopy.dma_cookie;
+       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
+       do {
+               if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
+                                             last_issued, &done,
+                                             &used) == DMA_SUCCESS) {
+                       /* Safe to free early-copied skbs now */
+                       __skb_queue_purge(&sk->sk_async_wait_queue);
+                       break;
+               } else {
+                       struct sk_buff *skb;
+                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
+                              (dma_async_is_complete(skb->dma_cookie, done,
+                                                     used) == DMA_SUCCESS)) {
+                               __skb_dequeue(&sk->sk_async_wait_queue);
+                               kfree_skb(skb);
+                       }
+               }
+       } while (wait);
+}
+#endif
+
 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 {
        struct sk_buff *skb;
                        /* __ Set realtime policy in scheduler __ */
                }
 
+#ifdef CONFIG_NET_DMA
+               if (tp->ucopy.dma_chan)
+                       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+#endif
                if (copied >= target) {
                        /* Do not sleep, just process backlog. */
                        release_sock(sk);
                        sk_wait_data(sk, &timeo);
 
 #ifdef CONFIG_NET_DMA
+               tcp_service_net_dma(sk, false);  /* Don't block */
                tp->ucopy.wakeup = 0;
 #endif
 
                                                copied = -EFAULT;
                                        break;
                                }
+
+                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
                                if ((offset + used) == skb->len)
                                        copied_early = 1;
 
        }
 
 #ifdef CONFIG_NET_DMA
-       if (tp->ucopy.dma_chan) {
-               dma_cookie_t done, used;
-
-               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
-
-               while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
-                                                tp->ucopy.dma_cookie, &done,
-                                                &used) == DMA_IN_PROGRESS) {
-                       /* do partial cleanup of sk_async_wait_queue */
-                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
-                              (dma_async_is_complete(skb->dma_cookie, done,
-                                                     used) == DMA_SUCCESS)) {
-                               __skb_dequeue(&sk->sk_async_wait_queue);
-                               kfree_skb(skb);
-                       }
-               }
+       tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
+       tp->ucopy.dma_chan = NULL;
 
-               /* Safe to free early-copied skbs now */
-               __skb_queue_purge(&sk->sk_async_wait_queue);
-               tp->ucopy.dma_chan = NULL;
-       }
        if (tp->ucopy.pinned_list) {
                dma_unpin_iovec_pages(tp->ucopy.pinned_list);
                tp->ucopy.pinned_list = NULL;