Merge tag 'hyperv-next-signed-20220114' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / net / hyperv / netvsc.c
index 5086cd0..afa81a9 100644 (file)
@@ -153,8 +153,21 @@ static void free_netvsc_device(struct rcu_head *head)
        int i;
 
        kfree(nvdev->extension);
-       vfree(nvdev->recv_buf);
-       vfree(nvdev->send_buf);
+
+       if (nvdev->recv_original_buf) {
+               hv_unmap_memory(nvdev->recv_buf);
+               vfree(nvdev->recv_original_buf);
+       } else {
+               vfree(nvdev->recv_buf);
+       }
+
+       if (nvdev->send_original_buf) {
+               hv_unmap_memory(nvdev->send_buf);
+               vfree(nvdev->send_original_buf);
+       } else {
+               vfree(nvdev->send_buf);
+       }
+
        bitmap_free(nvdev->send_section_map);
 
        for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
@@ -337,6 +350,7 @@ static int netvsc_init_buf(struct hv_device *device,
        struct nvsp_message *init_packet;
        unsigned int buf_size;
        int i, ret = 0;
+       void *vaddr;
 
        /* Get receive buffer area. */
        buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -372,6 +386,17 @@ static int netvsc_init_buf(struct hv_device *device,
                goto cleanup;
        }
 
+       if (hv_isolation_type_snp()) {
+               vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+               if (!vaddr) {
+                       ret = -ENOMEM;
+                       goto cleanup;
+               }
+
+               net_device->recv_original_buf = net_device->recv_buf;
+               net_device->recv_buf = vaddr;
+       }
+
        /* Notify the NetVsp of the gpadl handle */
        init_packet = &net_device->channel_init_pkt;
        memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -475,6 +500,17 @@ static int netvsc_init_buf(struct hv_device *device,
                goto cleanup;
        }
 
+       if (hv_isolation_type_snp()) {
+               vaddr = hv_map_memory(net_device->send_buf, buf_size);
+               if (!vaddr) {
+                       ret = -ENOMEM;
+                       goto cleanup;
+               }
+
+               net_device->send_original_buf = net_device->send_buf;
+               net_device->send_buf = vaddr;
+       }
+
        /* Notify the NetVsp of the gpadl handle */
        init_packet = &net_device->channel_init_pkt;
        memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -764,7 +800,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
 
        /* Notify the layer above us */
        if (likely(skb)) {
-               const struct hv_netvsc_packet *packet
+               struct hv_netvsc_packet *packet
                        = (struct hv_netvsc_packet *)skb->cb;
                u32 send_index = packet->send_buf_index;
                struct netvsc_stats *tx_stats;
@@ -780,6 +816,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
                tx_stats->bytes += packet->total_bytes;
                u64_stats_update_end(&tx_stats->syncp);
 
+               netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
                napi_consume_skb(skb, budget);
        }
 
@@ -944,6 +981,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                memset(dest, 0, padding);
 }
 
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+                     struct hv_netvsc_packet *packet)
+{
+       u32 page_count = packet->cp_partial ?
+               packet->page_buf_cnt - packet->rmsg_pgcnt :
+               packet->page_buf_cnt;
+       int i;
+
+       if (!hv_is_isolation_supported())
+               return;
+
+       if (!packet->dma_range)
+               return;
+
+       for (i = 0; i < page_count; i++)
+               dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+                                packet->dma_range[i].mapping_size,
+                                DMA_TO_DEVICE);
+
+       kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data.  Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array.  And the entries are not physically contiguous.  So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+                         struct hv_netvsc_packet *packet,
+                         struct hv_page_buffer *pb)
+{
+       u32 page_count =  packet->cp_partial ?
+               packet->page_buf_cnt - packet->rmsg_pgcnt :
+               packet->page_buf_cnt;
+       dma_addr_t dma;
+       int i;
+
+       if (!hv_is_isolation_supported())
+               return 0;
+
+       packet->dma_range = kcalloc(page_count,
+                                   sizeof(*packet->dma_range),
+                                   GFP_KERNEL);
+       if (!packet->dma_range)
+               return -ENOMEM;
+
+       for (i = 0; i < page_count; i++) {
+               char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+                                        + pb[i].offset);
+               u32 len = pb[i].len;
+
+               dma = dma_map_single(&hv_dev->device, src, len,
+                                    DMA_TO_DEVICE);
+               if (dma_mapping_error(&hv_dev->device, dma)) {
+                       kfree(packet->dma_range);
+                       return -ENOMEM;
+               }
+
+               /* pb[].offset and pb[].len are not changed during dma mapping
+                * and so not reassign.
+                */
+               packet->dma_range[i].dma = dma;
+               packet->dma_range[i].mapping_size = len;
+               pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+       }
+
+       return 0;
+}
+
 static inline int netvsc_send_pkt(
        struct hv_device *device,
        struct hv_netvsc_packet *packet,
@@ -984,14 +1103,24 @@ static inline int netvsc_send_pkt(
 
        trace_nvsp_send_pkt(ndev, out_channel, rpkt);
 
+       packet->dma_range = NULL;
        if (packet->page_buf_cnt) {
                if (packet->cp_partial)
                        pb += packet->rmsg_pgcnt;
 
+               ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+               if (ret) {
+                       ret = -EAGAIN;
+                       goto exit;
+               }
+
                ret = vmbus_sendpacket_pagebuffer(out_channel,
                                                  pb, packet->page_buf_cnt,
                                                  &nvmsg, sizeof(nvmsg),
                                                  req_id);
+
+               if (ret)
+                       netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
        } else {
                ret = vmbus_sendpacket(out_channel,
                                       &nvmsg, sizeof(nvmsg),
@@ -999,6 +1128,7 @@ static inline int netvsc_send_pkt(
                                       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        }
 
+exit:
        if (ret == 0) {
                atomic_inc_return(&nvchan->queue_sends);