X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=drivers%2Fnet%2Fcan%2Frx-offload.c;h=62393b029e4e6ccce11853b796d0341faa907b73;hb=54dd0b8904ac4c70df7616d39b80390835fede80;hp=e6a668ee77306a1859872cf543d68ef59cf8667d;hpb=fdcec00405fae0befdd7bbcbe738b7325e5746fb;p=linux-2.6-microblaze.git diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index e6a668ee7730..62393b029e4e 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014 David Jander, Protonic Holland - * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde +/* Copyright (c) 2014 Protonic Holland, + * David Jander + * Copyright (C) 2014-2017 Pengutronix, + * Marc Kleine-Budde */ #include @@ -11,14 +12,17 @@ struct can_rx_offload_cb { u32 timestamp; }; -static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) +static inline struct can_rx_offload_cb * +can_rx_offload_get_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); return (struct can_rx_offload_cb *)skb->cb; } -static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) +static inline bool +can_rx_offload_le(struct can_rx_offload *offload, + unsigned int a, unsigned int b) { if (offload->inc) return a <= b; @@ -26,7 +30,8 @@ static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned in return a >= b; } -static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) +static inline unsigned int +can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) { if (offload->inc) return (*val)++; @@ -36,7 +41,9 @@ static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, un static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) { - struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); + struct can_rx_offload *offload = container_of(napi, + struct can_rx_offload, + napi); struct net_device *dev = offload->dev; struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; @@ -65,8 +72,9 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) return work_done; } -static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, - int (*compare)(struct sk_buff *a, struct sk_buff *b)) +static inline void +__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, + int (*compare)(struct sk_buff *a, struct sk_buff *b)) { struct sk_buff *pos, *insert = NULL; @@ -107,41 +115,100 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) return cb_b->timestamp - cb_a->timestamp; } -static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +/** + * can_rx_offload_offload_one() - Read one CAN frame from HW + * @offload: pointer to rx_offload context + * @n: number of mailbox to read + * + * The task of this function is to read a CAN frame from mailbox @n + * from the device and return the mailbox's content as a struct + * sk_buff. + * + * If the struct can_rx_offload::skb_queue exceeds the maximal queue + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be + * allocated, the mailbox contents is discarded by reading it into an + * overflow buffer. This way the mailbox is marked as free by the + * driver. + * + * Return: A pointer to skb containing the CAN frame on success. + * + * NULL if the mailbox @n is empty. + * + * ERR_PTR() in case of an error + */ +static struct sk_buff * +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) { - struct sk_buff *skb = NULL; + struct sk_buff *skb = NULL, *skb_error = NULL; struct can_rx_offload_cb *cb; struct can_frame *cf; int ret; - /* If queue is full or skb not available, read to discard mailbox */ - if (likely(skb_queue_len(&offload->skb_queue) <= - offload->skb_queue_len_max)) + if (likely(skb_queue_len(&offload->skb_queue) < + offload->skb_queue_len_max)) { skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ + } else { + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ + } - if (!skb) { + /* If queue is full or skb not available, drop by reading into + * overflow buffer. + */ + if (unlikely(skb_error)) { struct can_frame cf_overflow; u32 timestamp; ret = offload->mailbox_read(offload, &cf_overflow, ×tamp, n); - if (ret) - offload->dev->stats.rx_dropped++; - return NULL; + /* Mailbox was empty. */ + if (unlikely(!ret)) + return NULL; + + /* Mailbox has been read and we're dropping it or + * there was a problem reading the mailbox. + * + * Increment error counters in any case. + */ + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + /* There was a problem reading the mailbox, propagate + * error value. + */ + if (unlikely(ret < 0)) + return ERR_PTR(ret); + + return skb_error; } cb = can_rx_offload_get_cb(skb); ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); - if (!ret) { + + /* Mailbox was empty. */ + if (unlikely(!ret)) { kfree_skb(skb); return NULL; } + /* There was a problem reading the mailbox, propagate error value. */ + if (unlikely(ret < 0)) { + kfree_skb(skb); + + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + return ERR_PTR(ret); + } + + /* Mailbox was read. */ return skb; } -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 pending) { struct sk_buff_head skb_queue; unsigned int i; @@ -157,8 +224,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen continue; skb = can_rx_offload_offload_one(offload, i); - if (!skb) - break; + if (IS_ERR_OR_NULL(skb)) + continue; __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); } @@ -188,7 +255,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) struct sk_buff *skb; int received = 0; - while ((skb = can_rx_offload_offload_one(offload, 0))) { + while (1) { + skb = can_rx_offload_offload_one(offload, 0); + if (IS_ERR(skb)) + continue; + if (!skb) + break; + skb_queue_tail(&offload->skb_queue, skb); received++; } @@ -207,8 +280,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, unsigned long flags; if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } cb = can_rx_offload_get_cb(skb); cb->timestamp = timestamp; @@ -250,8 +325,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } skb_queue_tail(&offload->skb_queue, skb); can_rx_offload_schedule(offload); @@ -260,7 +337,9 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, } EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); -static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +static int can_rx_offload_init_queue(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight) { offload->dev = dev; @@ -278,7 +357,8 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo return 0; } -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload) { unsigned int weight; @@ -298,7 +378,8 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * } EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, unsigned int weight) { if (!offload->mailbox_read) return -EINVAL;