2 * Copyright (c) 2016 Citrix Systems Inc.
3 * Copyright (c) 2002-2005, K A Fraser
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation; or, when distributed
8 * separately from the Linux kernel or incorporated into other
9 * software packages, subject to the following license:
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <linux/kthread.h>
34 #include <xen/events.h>
36 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
43 spin_lock_irqsave(&queue->rx_queue.lock, flags);
45 skb = skb_peek(&queue->rx_queue);
47 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
51 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
57 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
60 prod = queue->rx.sring->req_prod;
61 cons = queue->rx.req_cons;
63 if (prod - cons >= needed)
66 queue->rx.sring->req_event = prod + 1;
68 /* Make sure event is visible before we check prod
72 } while (queue->rx.sring->req_prod != prod);
77 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
81 spin_lock_irqsave(&queue->rx_queue.lock, flags);
83 __skb_queue_tail(&queue->rx_queue, skb);
85 queue->rx_queue_len += skb->len;
86 if (queue->rx_queue_len > queue->rx_queue_max) {
87 struct net_device *dev = queue->vif->dev;
89 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
92 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
95 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
99 spin_lock_irq(&queue->rx_queue.lock);
101 skb = __skb_dequeue(&queue->rx_queue);
103 queue->rx_queue_len -= skb->len;
104 if (queue->rx_queue_len < queue->rx_queue_max) {
105 struct netdev_queue *txq;
107 txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
108 netif_tx_wake_queue(txq);
112 spin_unlock_irq(&queue->rx_queue.lock);
117 static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
121 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
125 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
130 skb = skb_peek(&queue->rx_queue);
133 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
135 xenvif_rx_dequeue(queue);
140 static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
145 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
147 for (i = 0; i < queue->rx_copy.num; i++) {
148 struct gnttab_copy *op;
150 op = &queue->rx_copy.op[i];
152 /* If the copy failed, overwrite the status field in
153 * the corresponding response.
155 if (unlikely(op->status != GNTST_okay)) {
156 struct xen_netif_rx_response *rsp;
158 rsp = RING_GET_RESPONSE(&queue->rx,
159 queue->rx_copy.idx[i]);
160 rsp->status = op->status;
164 queue->rx_copy.num = 0;
166 /* Push responses for all completed packets. */
167 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
169 notify_remote_via_irq(queue->rx_irq);
171 __skb_queue_purge(queue->rx_copy.completed);
174 static void xenvif_rx_copy_add(struct xenvif_queue *queue,
175 struct xen_netif_rx_request *req,
176 unsigned int offset, void *data, size_t len)
178 struct gnttab_copy *op;
180 struct xen_page_foreign *foreign;
182 if (queue->rx_copy.num == COPY_BATCH_SIZE)
183 xenvif_rx_copy_flush(queue);
185 op = &queue->rx_copy.op[queue->rx_copy.num];
187 page = virt_to_page(data);
189 op->flags = GNTCOPY_dest_gref;
191 foreign = xen_page_foreign(page);
193 op->source.domid = foreign->domid;
194 op->source.u.ref = foreign->gref;
195 op->flags |= GNTCOPY_source_gref;
197 op->source.u.gmfn = virt_to_gfn(data);
198 op->source.domid = DOMID_SELF;
201 op->source.offset = xen_offset_in_page(data);
202 op->dest.u.ref = req->gref;
203 op->dest.domid = queue->vif->domid;
204 op->dest.offset = offset;
207 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
208 queue->rx_copy.num++;
211 static unsigned int xenvif_gso_type(struct sk_buff *skb)
213 if (skb_is_gso(skb)) {
214 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
215 return XEN_NETIF_GSO_TYPE_TCPV4;
217 return XEN_NETIF_GSO_TYPE_TCPV6;
219 return XEN_NETIF_GSO_TYPE_NONE;
222 struct xenvif_pkt_state {
224 size_t remaining_len;
225 struct sk_buff *frag_iter;
226 int frag; /* frag == -1 => frag_iter->head */
227 unsigned int frag_offset;
228 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
229 unsigned int extra_count;
233 static void xenvif_rx_next_skb(struct xenvif_queue *queue,
234 struct xenvif_pkt_state *pkt)
237 unsigned int gso_type;
239 skb = xenvif_rx_dequeue(queue);
241 queue->stats.tx_bytes += skb->len;
242 queue->stats.tx_packets++;
244 /* Reset packet state. */
245 memset(pkt, 0, sizeof(struct xenvif_pkt_state));
248 pkt->frag_iter = skb;
249 pkt->remaining_len = skb->len;
252 gso_type = xenvif_gso_type(skb);
253 if ((1 << gso_type) & queue->vif->gso_mask) {
254 struct xen_netif_extra_info *extra;
256 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
258 extra->u.gso.type = gso_type;
259 extra->u.gso.size = skb_shinfo(skb)->gso_size;
260 extra->u.gso.pad = 0;
261 extra->u.gso.features = 0;
262 extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
268 if (queue->vif->xdp_headroom) {
269 struct xen_netif_extra_info *extra;
271 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
273 memset(extra, 0, sizeof(struct xen_netif_extra_info));
274 extra->u.xdp.headroom = queue->vif->xdp_headroom;
275 extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
282 struct xen_netif_extra_info *extra;
284 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
286 extra->u.hash.algorithm =
287 XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
291 skb->protocol == htons(ETH_P_IP) ?
292 _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
293 _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
296 skb->protocol == htons(ETH_P_IP) ?
297 _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
298 _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
300 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
302 extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
309 static void xenvif_rx_complete(struct xenvif_queue *queue,
310 struct xenvif_pkt_state *pkt)
312 /* All responses are ready to be pushed. */
313 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
315 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
318 static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
320 struct sk_buff *frag_iter = pkt->frag_iter;
321 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
324 pkt->frag_offset = 0;
326 if (pkt->frag >= nr_frags) {
327 if (frag_iter == pkt->skb)
328 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
330 pkt->frag_iter = frag_iter->next;
336 static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
337 struct xenvif_pkt_state *pkt,
338 unsigned int offset, void **data,
341 struct sk_buff *frag_iter = pkt->frag_iter;
343 size_t frag_len, chunk_len;
347 if (pkt->frag == -1) {
348 frag_data = frag_iter->data;
349 frag_len = skb_headlen(frag_iter);
351 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
353 frag_data = skb_frag_address(frag);
354 frag_len = skb_frag_size(frag);
357 frag_data += pkt->frag_offset;
358 frag_len -= pkt->frag_offset;
360 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
361 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
362 xen_offset_in_page(frag_data));
364 pkt->frag_offset += chunk_len;
366 /* Advance to next frag? */
367 if (frag_len == chunk_len)
368 xenvif_rx_next_frag(pkt);
374 static void xenvif_rx_data_slot(struct xenvif_queue *queue,
375 struct xenvif_pkt_state *pkt,
376 struct xen_netif_rx_request *req,
377 struct xen_netif_rx_response *rsp)
379 unsigned int offset = queue->vif->xdp_headroom;
386 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
387 xenvif_rx_copy_add(queue, req, offset, data, len);
390 pkt->remaining_len -= len;
392 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
394 if (pkt->remaining_len > 0)
395 flags = XEN_NETRXF_more_data;
399 if (pkt->slot == 0) {
400 struct sk_buff *skb = pkt->skb;
402 if (skb->ip_summed == CHECKSUM_PARTIAL)
403 flags |= XEN_NETRXF_csum_blank |
404 XEN_NETRXF_data_validated;
405 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
406 flags |= XEN_NETRXF_data_validated;
408 if (pkt->extra_count != 0)
409 flags |= XEN_NETRXF_extra_info;
415 rsp->status = (s16)offset;
418 static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
419 struct xenvif_pkt_state *pkt,
420 struct xen_netif_rx_request *req,
421 struct xen_netif_rx_response *rsp)
423 struct xen_netif_extra_info *extra = (void *)rsp;
428 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
429 if (pkt->extras[i].type) {
430 *extra = pkt->extras[i];
432 if (pkt->extra_count != 0)
433 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
435 pkt->extras[i].type = 0;
442 static void xenvif_rx_skb(struct xenvif_queue *queue)
444 struct xenvif_pkt_state pkt;
446 xenvif_rx_next_skb(queue, &pkt);
448 queue->last_rx_time = jiffies;
451 struct xen_netif_rx_request *req;
452 struct xen_netif_rx_response *rsp;
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
455 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
457 /* Extras must go after the first data slot */
458 if (pkt.slot != 0 && pkt.extra_count != 0)
459 xenvif_rx_extra_slot(queue, &pkt, req, rsp);
461 xenvif_rx_data_slot(queue, &pkt, req, rsp);
463 queue->rx.req_cons++;
465 } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
467 xenvif_rx_complete(queue, &pkt);
470 #define RX_BATCH_SIZE 64
472 void xenvif_rx_action(struct xenvif_queue *queue)
474 struct sk_buff_head completed_skbs;
475 unsigned int work_done = 0;
477 __skb_queue_head_init(&completed_skbs);
478 queue->rx_copy.completed = &completed_skbs;
480 while (xenvif_rx_ring_slots_available(queue) &&
481 work_done < RX_BATCH_SIZE) {
482 xenvif_rx_skb(queue);
486 /* Flush any pending copies and complete all skbs. */
487 xenvif_rx_copy_flush(queue);
490 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
494 prod = queue->rx.sring->req_prod;
495 cons = queue->rx.req_cons;
497 return !queue->stalled &&
500 queue->last_rx_time + queue->vif->stall_timeout);
503 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
507 prod = queue->rx.sring->req_prod;
508 cons = queue->rx.req_cons;
510 return queue->stalled && prod - cons >= 1;
513 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
515 return xenvif_rx_ring_slots_available(queue) ||
516 (queue->vif->stall_timeout &&
517 (xenvif_rx_queue_stalled(queue) ||
518 xenvif_rx_queue_ready(queue))) ||
519 (test_kthread && kthread_should_stop()) ||
520 queue->vif->disabled;
523 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
528 skb = skb_peek(&queue->rx_queue);
530 return MAX_SCHEDULE_TIMEOUT;
532 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
533 return timeout < 0 ? 0 : timeout;
536 /* Wait until the guest Rx thread has work.
538 * The timeout needs to be adjusted based on the current head of the
539 * queue (and not just the head at the beginning). In particular, if
540 * the queue is initially empty an infinite timeout is used and this
541 * needs to be reduced when a skb is queued.
543 * This cannot be done with wait_event_timeout() because it only
544 * calculates the timeout once.
546 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
550 if (xenvif_have_rx_work(queue, true))
556 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
557 if (xenvif_have_rx_work(queue, true))
559 if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
560 &queue->eoi_pending) &
561 (NETBK_RX_EOI | NETBK_COMMON_EOI))
562 xen_irq_lateeoi(queue->rx_irq, 0);
564 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
568 finish_wait(&queue->wq, &wait);
571 static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
573 struct xenvif *vif = queue->vif;
575 queue->stalled = true;
577 /* At least one queue has stalled? Disable the carrier. */
578 spin_lock(&vif->lock);
579 if (vif->stalled_queues++ == 0) {
580 netdev_info(vif->dev, "Guest Rx stalled");
581 netif_carrier_off(vif->dev);
583 spin_unlock(&vif->lock);
586 static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
588 struct xenvif *vif = queue->vif;
590 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
591 queue->stalled = false;
593 /* All queues are ready? Enable the carrier. */
594 spin_lock(&vif->lock);
595 if (--vif->stalled_queues == 0) {
596 netdev_info(vif->dev, "Guest Rx ready");
597 netif_carrier_on(vif->dev);
599 spin_unlock(&vif->lock);
602 int xenvif_kthread_guest_rx(void *data)
604 struct xenvif_queue *queue = data;
605 struct xenvif *vif = queue->vif;
607 if (!vif->stall_timeout)
608 xenvif_queue_carrier_on(queue);
611 xenvif_wait_for_rx_work(queue);
613 if (kthread_should_stop())
616 /* This frontend is found to be rogue, disable it in
617 * kthread context. Currently this is only set when
618 * netback finds out frontend sends malformed packet,
619 * but we cannot disable the interface in softirq
620 * context so we defer it here, if this thread is
621 * associated with queue 0.
623 if (unlikely(vif->disabled && queue->id == 0)) {
624 xenvif_carrier_off(vif);
628 if (!skb_queue_empty(&queue->rx_queue))
629 xenvif_rx_action(queue);
631 /* If the guest hasn't provided any Rx slots for a
632 * while it's probably not responsive, drop the
633 * carrier so packets are dropped earlier.
635 if (vif->stall_timeout) {
636 if (xenvif_rx_queue_stalled(queue))
637 xenvif_queue_carrier_off(queue);
638 else if (xenvif_rx_queue_ready(queue))
639 xenvif_queue_carrier_on(queue);
642 /* Queued packets may have foreign pages from other
643 * domains. These cannot be queued indefinitely as
644 * this would starve guests of grant refs and transmit
647 xenvif_rx_queue_drop_expired(queue);
652 /* Bin any remaining skbs */
653 xenvif_rx_queue_purge(queue);