1 // SPDX-License-Identifier: GPL-2.0
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 // Marc Kleine-Budde <kernel@pengutronix.de>
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/pm_runtime.h>
23 #include <asm/unaligned.h>
25 #include "mcp251xfd.h"
27 #define DEVICE_NAME "mcp251xfd"
29 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
30 .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
31 MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 .model = MCP251XFD_MODEL_MCP2517FD,
36 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
37 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
38 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
39 .model = MCP251XFD_MODEL_MCP2518FD,
42 /* Autodetect model, start with CRC enabled. */
43 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
44 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
45 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
46 .model = MCP251XFD_MODEL_MCP251XFD,
49 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
61 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
73 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
76 case MCP251XFD_MODEL_MCP2517FD:
78 case MCP251XFD_MODEL_MCP2518FD:
80 case MCP251XFD_MODEL_MCP251XFD:
87 static inline const char *
88 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 return __mcp251xfd_get_model_str(priv->devtype_data.model);
93 static const char *mcp251xfd_get_mode_str(const u8 mode)
96 case MCP251XFD_REG_CON_MODE_MIXED:
97 return "Mixed (CAN FD/CAN 2.0)";
98 case MCP251XFD_REG_CON_MODE_SLEEP:
100 case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
101 return "Internal Loopback";
102 case MCP251XFD_REG_CON_MODE_LISTENONLY:
103 return "Listen Only";
104 case MCP251XFD_REG_CON_MODE_CONFIG:
105 return "Configuration";
106 case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
107 return "External Loopback";
108 case MCP251XFD_REG_CON_MODE_CAN2_0:
110 case MCP251XFD_REG_CON_MODE_RESTRICTED:
111 return "Restricted Operation";
117 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
122 return regulator_enable(priv->reg_vdd);
125 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
130 return regulator_disable(priv->reg_vdd);
134 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 if (!priv->reg_xceiver)
139 return regulator_enable(priv->reg_xceiver);
143 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 if (!priv->reg_xceiver)
148 return regulator_disable(priv->reg_xceiver);
151 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
155 err = clk_prepare_enable(priv->clk);
159 err = mcp251xfd_vdd_enable(priv);
161 clk_disable_unprepare(priv->clk);
163 /* Wait for oscillator stabilisation time after power up */
164 usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
165 2 * MCP251XFD_OSC_STAB_SLEEP_US);
170 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
174 err = mcp251xfd_vdd_disable(priv);
178 clk_disable_unprepare(priv->clk);
184 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
185 union mcp251xfd_write_reg_buf *write_reg_buf,
186 const u16 reg, const u32 mask, const u32 val)
188 u8 first_byte, last_byte, len;
192 first_byte = mcp251xfd_first_byte_set(mask);
193 last_byte = mcp251xfd_last_byte_set(mask);
194 len = last_byte - first_byte + 1;
196 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
197 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
198 memcpy(data, &val_le32, len);
200 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
203 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
206 len += sizeof(write_reg_buf->crc.cmd);
207 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
208 put_unaligned_be16(crc, (void *)write_reg_buf + len);
211 len += sizeof(write_reg_buf->crc.crc);
213 len += sizeof(write_reg_buf->nocrc.cmd);
220 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
226 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
230 *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
236 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
242 err = regmap_read(priv->map_reg,
243 MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
248 *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
254 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
255 const struct mcp251xfd_rx_ring *ring,
261 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
266 *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
272 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
273 const struct mcp251xfd_rx_ring *ring,
279 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
284 fifo_ua -= ring->base - MCP251XFD_RAM_START;
285 *rx_tail = fifo_ua / ring->obj_size;
291 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
292 const struct mcp251xfd_tx_ring *ring,
293 struct mcp251xfd_tx_obj *tx_obj,
294 const u8 rts_buf_len,
297 struct spi_transfer *xfer;
301 addr = mcp251xfd_get_tx_obj_addr(ring, n);
302 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
303 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
306 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
309 xfer = &tx_obj->xfer[0];
310 xfer->tx_buf = &tx_obj->buf;
311 xfer->len = 0; /* actual len is assigned on the fly */
313 xfer->cs_change_delay.value = 0;
314 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316 /* FIFO request to send */
317 xfer = &tx_obj->xfer[1];
318 xfer->tx_buf = &ring->rts_buf;
319 xfer->len = rts_buf_len;
322 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
323 ARRAY_SIZE(tx_obj->xfer));
326 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 struct mcp251xfd_tef_ring *tef_ring;
329 struct mcp251xfd_tx_ring *tx_ring;
330 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 struct mcp251xfd_tx_obj *tx_obj;
332 struct spi_transfer *xfer;
338 netdev_reset_queue(priv->ndev);
341 tef_ring = priv->tef;
345 /* FIFO increment TEF tail pointer */
346 addr = MCP251XFD_REG_TEFCON;
347 val = MCP251XFD_REG_TEFCON_UINC;
348 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
351 for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
352 xfer = &tef_ring->uinc_xfer[j];
353 xfer->tx_buf = &tef_ring->uinc_buf;
356 xfer->cs_change_delay.value = 0;
357 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
360 /* "cs_change == 1" on the last transfer results in an active
361 * chip select after the complete SPI message. This causes the
362 * controller to interpret the next register access as
363 * data. Set "cs_change" of the last transfer to "0" to
364 * properly deactivate the chip select at the end of the
373 tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
375 /* FIFO request to send */
376 addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
377 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
378 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
381 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
382 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
385 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
389 rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
393 mcp251xfd_get_tx_obj_addr(tx_ring,
396 rx_ring->base = prev_rx_ring->base +
397 prev_rx_ring->obj_size *
398 prev_rx_ring->obj_num;
400 prev_rx_ring = rx_ring;
402 /* FIFO increment RX tail pointer */
403 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
404 val = MCP251XFD_REG_FIFOCON_UINC;
405 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
408 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
409 xfer = &rx_ring->uinc_xfer[j];
410 xfer->tx_buf = &rx_ring->uinc_buf;
413 xfer->cs_change_delay.value = 0;
414 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
417 /* "cs_change == 1" on the last transfer results in an
418 * active chip select after the complete SPI
419 * message. This causes the controller to interpret
420 * the next register access as data. Set "cs_change"
421 * of the last transfer to "0" to properly deactivate
422 * the chip select at the end of the message.
428 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
432 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
438 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
440 struct mcp251xfd_tx_ring *tx_ring;
441 struct mcp251xfd_rx_ring *rx_ring;
442 int tef_obj_size, tx_obj_size, rx_obj_size;
446 tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
447 /* listen-only mode works like FD mode */
448 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
449 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
450 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
451 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
453 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
454 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
455 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
459 tx_ring->obj_num = tx_obj_num;
460 tx_ring->obj_size = tx_obj_size;
462 ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
463 (tef_obj_size + tx_obj_size);
466 i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
470 rx_obj_num = ram_free / rx_obj_size;
471 rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
472 MCP251XFD_RX_OBJ_NUM_MAX);
474 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
477 mcp251xfd_ring_free(priv);
480 rx_ring->obj_num = rx_obj_num;
481 rx_ring->obj_size = rx_obj_size;
482 priv->rx[i] = rx_ring;
484 ram_free -= rx_ring->obj_num * rx_ring->obj_size;
486 priv->rx_ring_num = i;
488 netdev_dbg(priv->ndev,
489 "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
490 tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
491 tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
493 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
494 netdev_dbg(priv->ndev,
495 "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
496 i, rx_ring->obj_num, rx_ring->obj_size,
497 rx_ring->obj_size * rx_ring->obj_num);
500 netdev_dbg(priv->ndev,
501 "FIFO setup: free: %d bytes\n",
508 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
513 err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
517 *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
523 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
524 const u8 mode_req, bool nowait)
529 con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
530 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
531 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
535 if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
538 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
539 FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
541 MCP251XFD_POLL_SLEEP_US,
542 MCP251XFD_POLL_TIMEOUT_US);
544 u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
546 netdev_err(priv->ndev,
547 "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
548 mcp251xfd_get_mode_str(mode_req), mode_req,
549 mcp251xfd_get_mode_str(mode), mode);
557 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
560 return __mcp251xfd_chip_set_mode(priv, mode_req, false);
564 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
567 return __mcp251xfd_chip_set_mode(priv, mode_req, true);
570 static inline bool mcp251xfd_osc_invalid(u32 reg)
572 return reg == 0x0 || reg == 0xffffffff;
575 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
577 u32 osc, osc_reference, osc_mask;
580 /* Set Power On Defaults for "Clock Output Divisor" and remove
581 * "Oscillator Disable" bit.
583 osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
584 MCP251XFD_REG_OSC_CLKODIV_10);
585 osc_reference = MCP251XFD_REG_OSC_OSCRDY;
586 osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
590 * If the controller is in Sleep Mode the following write only
591 * removes the "Oscillator Disable" bit and powers it up. All
592 * other bits are unaffected.
594 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
598 /* Wait for "Oscillator Ready" bit */
599 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
600 (osc & osc_mask) == osc_reference,
601 MCP251XFD_OSC_STAB_SLEEP_US,
602 MCP251XFD_OSC_STAB_TIMEOUT_US);
603 if (mcp251xfd_osc_invalid(osc)) {
604 netdev_err(priv->ndev,
605 "Failed to detect %s (osc=0x%08x).\n",
606 mcp251xfd_get_model_str(priv), osc);
608 } else if (err == -ETIMEDOUT) {
609 netdev_err(priv->ndev,
610 "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
618 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
620 const __be16 cmd = mcp251xfd_cmd_reset();
623 /* The Set Mode and SPI Reset command only seems to works if
624 * the controller is not in Sleep Mode.
626 err = mcp251xfd_chip_clock_enable(priv);
630 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
634 /* spi_write_then_read() works with non DMA-safe buffers */
635 return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
638 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
640 u32 osc, osc_reference;
644 err = mcp251xfd_chip_get_mode(priv, &mode);
648 if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
649 netdev_info(priv->ndev,
650 "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
651 mcp251xfd_get_mode_str(mode), mode);
655 osc_reference = MCP251XFD_REG_OSC_OSCRDY |
656 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
657 MCP251XFD_REG_OSC_CLKODIV_10);
659 /* check reset defaults of OSC reg */
660 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
664 if (osc != osc_reference) {
665 netdev_info(priv->ndev,
666 "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
674 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
678 for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
680 netdev_info(priv->ndev,
681 "Retrying to reset controller.\n");
683 err = mcp251xfd_chip_softreset_do(priv);
684 if (err == -ETIMEDOUT)
689 err = mcp251xfd_chip_softreset_check(priv);
690 if (err == -ETIMEDOUT)
701 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
706 /* Activate Low Power Mode on Oscillator Disable. This only
707 * works on the MCP2518FD. The MCP2517FD will go into normal
708 * Sleep Mode instead.
710 osc = MCP251XFD_REG_OSC_LPMEN |
711 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
712 MCP251XFD_REG_OSC_CLKODIV_10);
713 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
717 /* Set Time Base Counter Prescaler to 1.
719 * This means an overflow of the 32 bit Time Base Counter
720 * register at 40 MHz every 107 seconds.
722 return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
723 MCP251XFD_REG_TSCON_TBCEN);
726 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
728 const struct can_bittiming *bt = &priv->can.bittiming;
729 const struct can_bittiming *dbt = &priv->can.data_bittiming;
734 /* CAN Control Register
736 * - no transmit bandwidth sharing
738 * - disable transmit queue
739 * - store in transmit FIFO event
740 * - transition to restricted operation mode on system error
741 * - ESI is transmitted recessive when ESI of message is high or
742 * CAN controller error passive
743 * - restricted retransmission attempts,
744 * use TQXCON_TXAT and FIFOCON_TXAT
745 * - wake-up filter bits T11FILTER
746 * - use CAN bus line filter for wakeup
747 * - protocol exception is treated as a form error
748 * - Do not compare data bytes
750 val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
751 MCP251XFD_REG_CON_MODE_CONFIG) |
752 MCP251XFD_REG_CON_STEF |
753 MCP251XFD_REG_CON_ESIGM |
754 MCP251XFD_REG_CON_RTXAT |
755 FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
756 MCP251XFD_REG_CON_WFT_T11FILTER) |
757 MCP251XFD_REG_CON_WAKFIL |
758 MCP251XFD_REG_CON_PXEDIS;
760 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
761 val |= MCP251XFD_REG_CON_ISOCRCEN;
763 err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
767 /* Nominal Bit Time */
768 val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
769 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
770 bt->prop_seg + bt->phase_seg1 - 1) |
771 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
772 bt->phase_seg2 - 1) |
773 FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
775 err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
779 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
783 val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
784 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
785 dbt->prop_seg + dbt->phase_seg1 - 1) |
786 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
787 dbt->phase_seg2 - 1) |
788 FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
790 err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
794 /* Transmitter Delay Compensation */
795 tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
797 val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
798 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
799 FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
801 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
804 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
813 * - PIN1: GPIO Input/RX Interrupt
815 * PIN1 must be Input, otherwise there is a glitch on the
816 * rx-INT line. It happens between setting the PIN as output
817 * (in the first byte of the SPI transfer) and configuring the
818 * PIN as interrupt (in the last byte of the SPI transfer).
820 val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
821 MCP251XFD_REG_IOCON_TRIS0;
822 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
825 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
836 val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
837 MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
838 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
842 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
843 const struct mcp251xfd_rx_ring *ring)
847 /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
849 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
850 * generate a RXOVIF, use this to properly detect RX MAB
853 fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
855 MCP251XFD_REG_FIFOCON_RXTSEN |
856 MCP251XFD_REG_FIFOCON_RXOVIE |
857 MCP251XFD_REG_FIFOCON_TFNRFNIE;
859 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
860 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
861 MCP251XFD_REG_FIFOCON_PLSIZE_64);
863 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
864 MCP251XFD_REG_FIFOCON_PLSIZE_8);
866 return regmap_write(priv->map_reg,
867 MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
871 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
872 const struct mcp251xfd_rx_ring *ring)
876 fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
877 MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
879 return regmap_update_bits(priv->map_reg,
880 MCP251XFD_REG_FLTCON(ring->nr >> 2),
881 MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
885 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
887 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
888 const struct mcp251xfd_rx_ring *rx_ring;
893 val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
894 tx_ring->obj_num - 1) |
895 MCP251XFD_REG_TEFCON_TEFTSEN |
896 MCP251XFD_REG_TEFCON_TEFOVIE |
897 MCP251XFD_REG_TEFCON_TEFNEIE;
899 err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
904 val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
905 tx_ring->obj_num - 1) |
906 MCP251XFD_REG_FIFOCON_TXEN |
907 MCP251XFD_REG_FIFOCON_TXATIE;
909 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
910 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
911 MCP251XFD_REG_FIFOCON_PLSIZE_64);
913 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
914 MCP251XFD_REG_FIFOCON_PLSIZE_8);
916 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
917 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
918 MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
920 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
921 MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
923 err = regmap_write(priv->map_reg,
924 MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
930 mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
931 err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
935 err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
943 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
945 struct mcp251xfd_ecc *ecc = &priv->ecc;
952 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
953 val = MCP251XFD_REG_ECCCON_ECCEN;
955 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
956 MCP251XFD_REG_ECCCON_ECCEN, val);
960 ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
964 err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
971 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
973 struct mcp251xfd_ecc *ecc = &priv->ecc;
978 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
983 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
984 mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
985 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
986 mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
987 else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
988 mode = MCP251XFD_REG_CON_MODE_MIXED;
990 mode = MCP251XFD_REG_CON_MODE_CAN2_0;
996 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
1001 mode = mcp251xfd_get_normal_mode(priv);
1003 return __mcp251xfd_chip_set_mode(priv, mode, nowait);
1007 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
1009 return __mcp251xfd_chip_set_normal_mode(priv, false);
1013 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
1015 return __mcp251xfd_chip_set_normal_mode(priv, true);
1018 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1023 val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1024 err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1028 val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1029 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1033 val = MCP251XFD_REG_INT_CERRIE |
1034 MCP251XFD_REG_INT_SERRIE |
1035 MCP251XFD_REG_INT_RXOVIE |
1036 MCP251XFD_REG_INT_TXATIE |
1037 MCP251XFD_REG_INT_SPICRCIE |
1038 MCP251XFD_REG_INT_ECCIE |
1039 MCP251XFD_REG_INT_TEFIE |
1040 MCP251XFD_REG_INT_MODIE |
1041 MCP251XFD_REG_INT_RXIE;
1043 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1044 val |= MCP251XFD_REG_INT_IVMIE;
1046 return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1049 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1054 err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1058 mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1059 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1064 return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1067 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1068 const enum can_state state)
1070 priv->can.state = state;
1072 mcp251xfd_chip_interrupts_disable(priv);
1073 mcp251xfd_chip_rx_int_disable(priv);
1074 return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1077 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1081 err = mcp251xfd_chip_softreset(priv);
1085 err = mcp251xfd_chip_clock_init(priv);
1089 err = mcp251xfd_set_bittiming(priv);
1093 err = mcp251xfd_chip_rx_int_enable(priv);
1097 err = mcp251xfd_chip_ecc_init(priv);
1101 mcp251xfd_ring_init(priv);
1103 err = mcp251xfd_chip_fifo_init(priv);
1107 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1109 err = mcp251xfd_chip_set_normal_mode(priv);
1116 mcp251xfd_dump(priv);
1117 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1122 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1124 struct mcp251xfd_priv *priv = netdev_priv(ndev);
1128 case CAN_MODE_START:
1129 err = mcp251xfd_chip_start(priv);
1133 err = mcp251xfd_chip_interrupts_enable(priv);
1135 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1139 netif_wake_queue(ndev);
1149 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1150 struct can_berr_counter *bec)
1152 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1156 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1160 if (trec & MCP251XFD_REG_TREC_TXBO)
1163 bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1164 bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1169 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1170 struct can_berr_counter *bec)
1172 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1174 /* Avoid waking up the controller if the interface is down */
1175 if (!(ndev->flags & IFF_UP))
1178 /* The controller is powered down during Bus Off, use saved
1181 if (priv->can.state == CAN_STATE_BUS_OFF) {
1186 return __mcp251xfd_get_berr_counter(ndev, bec);
1189 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1191 u8 tef_tail_chip, tef_tail;
1194 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1197 err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1201 tef_tail = mcp251xfd_get_tef_tail(priv);
1202 if (tef_tail_chip != tef_tail) {
1203 netdev_err(priv->ndev,
1204 "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1205 tef_tail_chip, tef_tail);
1213 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1214 const struct mcp251xfd_rx_ring *ring)
1216 u8 rx_tail_chip, rx_tail;
1219 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1222 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1226 rx_tail = mcp251xfd_get_rx_tail(ring);
1227 if (rx_tail_chip != rx_tail) {
1228 netdev_err(priv->ndev,
1229 "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1230 rx_tail_chip, rx_tail);
1238 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1240 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1244 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1248 if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1249 netdev_err(priv->ndev,
1250 "Transmit Event FIFO buffer overflow.\n");
1254 netdev_info(priv->ndev,
1255 "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1256 tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1257 "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1258 "not empty" : "empty",
1259 seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1261 /* The Sequence Number in the TEF doesn't match our tef_tail. */
1266 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1267 const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1268 unsigned int *frame_len_ptr)
1270 struct net_device_stats *stats = &priv->ndev->stats;
1271 struct sk_buff *skb;
1272 u32 seq, seq_masked, tef_tail_masked, tef_tail;
1274 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1277 /* Use the MCP2517FD mask on the MCP2518FD, too. We only
1278 * compare 7 bits, this should be enough to detect
1279 * net-yet-completed, i.e. old TEF objects.
1282 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1283 tef_tail_masked = priv->tef->tail &
1284 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1285 if (seq_masked != tef_tail_masked)
1286 return mcp251xfd_handle_tefif_recover(priv, seq);
1288 tef_tail = mcp251xfd_get_tef_tail(priv);
1289 skb = priv->can.echo_skb[tef_tail];
1291 mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
1293 can_rx_offload_get_echo_skb(&priv->offload,
1294 tef_tail, hw_tef_obj->ts,
1296 stats->tx_packets++;
1302 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1304 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1305 unsigned int new_head;
1309 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1313 /* chip_tx_tail, is the next TX-Object send by the HW.
1314 * The new TEF head must be >= the old head, ...
1316 new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1317 if (new_head <= priv->tef->head)
1318 new_head += tx_ring->obj_num;
1320 /* ... but it cannot exceed the TX head. */
1321 priv->tef->head = min(new_head, tx_ring->head);
1323 return mcp251xfd_check_tef_tail(priv);
1327 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1328 struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1329 const u8 offset, const u8 len)
1331 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1332 const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1334 if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1335 (offset > tx_ring->obj_num ||
1336 len > tx_ring->obj_num ||
1337 offset + len > tx_ring->obj_num)) {
1338 netdev_err(priv->ndev,
1339 "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1340 tx_ring->obj_num, offset, len);
1344 return regmap_bulk_read(priv->map_rx,
1345 mcp251xfd_get_tef_obj_addr(offset),
1347 sizeof(*hw_tef_obj) / val_bytes * len);
1350 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1352 struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1353 unsigned int total_frame_len = 0;
1354 u8 tef_tail, len, l;
1357 err = mcp251xfd_tef_ring_update(priv);
1361 tef_tail = mcp251xfd_get_tef_tail(priv);
1362 len = mcp251xfd_get_tef_len(priv);
1363 l = mcp251xfd_get_tef_linear_len(priv);
1364 err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1369 err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1374 for (i = 0; i < len; i++) {
1375 unsigned int frame_len = 0;
1377 err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
1378 /* -EAGAIN means the Sequence Number in the TEF
1379 * doesn't match our tef_tail. This can happen if we
1380 * read the TEF objects too early. Leave loop let the
1381 * interrupt handler call us again.
1384 goto out_netif_wake_queue;
1388 total_frame_len += frame_len;
1391 out_netif_wake_queue:
1392 len = i; /* number of handled goods TEFs */
1394 struct mcp251xfd_tef_ring *ring = priv->tef;
1395 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1398 /* Increment the TEF FIFO tail pointer 'len' times in
1399 * a single SPI message.
1402 * Calculate offset, so that the SPI transfer ends on
1403 * the last message of the uinc_xfer array, which has
1404 * "cs_change == 0", to properly deactivate the chip
1407 offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1408 err = spi_sync_transfer(priv->spi,
1409 ring->uinc_xfer + offset, len);
1413 tx_ring->tail += len;
1414 netdev_completed_queue(priv->ndev, len, total_frame_len);
1416 err = mcp251xfd_check_tef_tail(priv);
1421 mcp251xfd_ecc_tefif_successful(priv);
1423 if (mcp251xfd_get_tx_free(priv->tx)) {
1424 /* Make sure that anybody stopping the queue after
1425 * this sees the new tx_ring->tail.
1428 netif_wake_queue(priv->ndev);
1435 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1436 struct mcp251xfd_rx_ring *ring)
1442 err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1446 /* chip_rx_head, is the next RX-Object filled by the HW.
1447 * The new RX head must be >= the old head.
1449 new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1450 if (new_head <= ring->head)
1451 new_head += ring->obj_num;
1453 ring->head = new_head;
1455 return mcp251xfd_check_rx_tail(priv, ring);
1459 mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv,
1460 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1461 struct sk_buff *skb)
1463 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1466 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1469 eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1470 sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1472 cfd->can_id = CAN_EFF_FLAG |
1473 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1474 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1476 cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1480 dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1483 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1485 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1486 cfd->flags |= CANFD_ESI;
1488 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1489 cfd->flags |= CANFD_BRS;
1491 cfd->len = can_fd_dlc2len(dlc);
1493 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1494 cfd->can_id |= CAN_RTR_FLAG;
1496 can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1497 priv->can.ctrlmode);
1500 if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1501 memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1503 mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
1507 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1508 struct mcp251xfd_rx_ring *ring,
1509 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1511 struct net_device_stats *stats = &priv->ndev->stats;
1512 struct sk_buff *skb;
1513 struct canfd_frame *cfd;
1516 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1517 skb = alloc_canfd_skb(priv->ndev, &cfd);
1519 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1522 stats->rx_dropped++;
1526 mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1527 err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1529 stats->rx_fifo_errors++;
1535 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1536 const struct mcp251xfd_rx_ring *ring,
1537 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1538 const u8 offset, const u8 len)
1540 const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1543 err = regmap_bulk_read(priv->map_rx,
1544 mcp251xfd_get_rx_obj_addr(ring, offset),
1546 len * ring->obj_size / val_bytes);
1552 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1553 struct mcp251xfd_rx_ring *ring)
1555 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1559 err = mcp251xfd_rx_ring_update(priv, ring);
1563 while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1566 rx_tail = mcp251xfd_get_rx_tail(ring);
1568 err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1573 for (i = 0; i < len; i++) {
1574 err = mcp251xfd_handle_rxif_one(priv, ring,
1576 i * ring->obj_size);
1581 /* Increment the RX FIFO tail pointer 'len' times in a
1582 * single SPI message.
1585 * Calculate offset, so that the SPI transfer ends on
1586 * the last message of the uinc_xfer array, which has
1587 * "cs_change == 0", to properly deactivate the chip
1590 offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1591 err = spi_sync_transfer(priv->spi,
1592 ring->uinc_xfer + offset, len);
1602 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1604 struct mcp251xfd_rx_ring *ring;
1607 mcp251xfd_for_each_rx_ring(priv, ring, n) {
1608 err = mcp251xfd_handle_rxif_ring(priv, ring);
1616 static struct sk_buff *
1617 mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
1618 struct can_frame **cf, u32 *timestamp)
1620 struct sk_buff *skb;
1623 err = mcp251xfd_get_timestamp(priv, timestamp);
1627 skb = alloc_can_err_skb(priv->ndev, cf);
1629 mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
1634 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1636 struct net_device_stats *stats = &priv->ndev->stats;
1637 struct mcp251xfd_rx_ring *ring;
1638 struct sk_buff *skb;
1639 struct can_frame *cf;
1640 u32 timestamp, rxovif;
1643 stats->rx_over_errors++;
1646 err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1650 mcp251xfd_for_each_rx_ring(priv, ring, i) {
1651 if (!(rxovif & BIT(ring->fifo_nr)))
1654 /* If SERRIF is active, there was a RX MAB overflow. */
1655 if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1656 netdev_info(priv->ndev,
1657 "RX-%d: MAB overflow detected.\n",
1660 netdev_info(priv->ndev,
1661 "RX-%d: FIFO overflow.\n", ring->nr);
1664 err = regmap_update_bits(priv->map_reg,
1665 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1666 MCP251XFD_REG_FIFOSTA_RXOVIF,
1672 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1676 cf->can_id |= CAN_ERR_CRTL;
1677 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1679 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1681 stats->rx_fifo_errors++;
1686 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1688 netdev_info(priv->ndev, "%s\n", __func__);
1693 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1695 struct net_device_stats *stats = &priv->ndev->stats;
1696 u32 bdiag1, timestamp;
1697 struct sk_buff *skb;
1698 struct can_frame *cf = NULL;
1701 err = mcp251xfd_get_timestamp(priv, ×tamp);
1705 err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1709 /* Write 0s to clear error bits, don't write 1s to non active
1710 * bits, as they will be set.
1712 err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1716 priv->can.can_stats.bus_error++;
1718 skb = alloc_can_err_skb(priv->ndev, &cf);
1720 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1722 /* Controller misconfiguration */
1723 if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1724 netdev_err(priv->ndev,
1725 "recv'd DLC is larger than PLSIZE of FIFO element.");
1728 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1729 MCP251XFD_REG_BDIAG1_NCRCERR)) {
1730 netdev_dbg(priv->ndev, "CRC error\n");
1734 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1736 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1737 MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1738 netdev_dbg(priv->ndev, "Stuff error\n");
1742 cf->data[2] |= CAN_ERR_PROT_STUFF;
1744 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1745 MCP251XFD_REG_BDIAG1_NFORMERR)) {
1746 netdev_dbg(priv->ndev, "Format error\n");
1750 cf->data[2] |= CAN_ERR_PROT_FORM;
1754 if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1755 netdev_dbg(priv->ndev, "NACK error\n");
1759 cf->can_id |= CAN_ERR_ACK;
1760 cf->data[2] |= CAN_ERR_PROT_TX;
1763 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1764 MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1765 netdev_dbg(priv->ndev, "Bit1 error\n");
1769 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1771 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1772 MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1773 netdev_dbg(priv->ndev, "Bit0 error\n");
1777 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1783 mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
1784 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1786 stats->rx_fifo_errors++;
1791 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1793 struct net_device_stats *stats = &priv->ndev->stats;
1794 struct sk_buff *skb;
1795 struct can_frame *cf = NULL;
1796 enum can_state new_state, rx_state, tx_state;
1797 u32 trec, timestamp;
1800 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1804 if (trec & MCP251XFD_REG_TREC_TXBO)
1805 tx_state = CAN_STATE_BUS_OFF;
1806 else if (trec & MCP251XFD_REG_TREC_TXBP)
1807 tx_state = CAN_STATE_ERROR_PASSIVE;
1808 else if (trec & MCP251XFD_REG_TREC_TXWARN)
1809 tx_state = CAN_STATE_ERROR_WARNING;
1811 tx_state = CAN_STATE_ERROR_ACTIVE;
1813 if (trec & MCP251XFD_REG_TREC_RXBP)
1814 rx_state = CAN_STATE_ERROR_PASSIVE;
1815 else if (trec & MCP251XFD_REG_TREC_RXWARN)
1816 rx_state = CAN_STATE_ERROR_WARNING;
1818 rx_state = CAN_STATE_ERROR_ACTIVE;
1820 new_state = max(tx_state, rx_state);
1821 if (new_state == priv->can.state)
1824 /* The skb allocation might fail, but can_change_state()
1825 * handles cf == NULL.
1827 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1828 can_change_state(priv->ndev, cf, tx_state, rx_state);
1830 if (new_state == CAN_STATE_BUS_OFF) {
1831 /* As we're going to switch off the chip now, let's
1832 * save the error counters and return them to
1833 * userspace, if do_get_berr_counter() is called while
1834 * the chip is in Bus Off.
1836 err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1840 mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1841 can_bus_off(priv->ndev);
1847 if (new_state != CAN_STATE_BUS_OFF) {
1848 struct can_berr_counter bec;
1850 err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1853 cf->data[6] = bec.txerr;
1854 cf->data[7] = bec.rxerr;
1857 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1859 stats->rx_fifo_errors++;
1865 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1867 const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1871 err = mcp251xfd_chip_get_mode(priv, &mode);
1875 if (mode == mode_reference) {
1876 netdev_dbg(priv->ndev,
1877 "Controller changed into %s Mode (%u).\n",
1878 mcp251xfd_get_mode_str(mode), mode);
1882 /* According to MCP2517FD errata DS80000792B 1., during a TX
1883 * MAB underflow, the controller will transition to Restricted
1884 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1886 * However this is not always the case. If SERR2LOM is
1887 * configured for Restricted Operation Mode (SERR2LOM not set)
1888 * the MCP2517FD will sometimes transition to Listen Only Mode
1889 * first. When polling this bit we see that it will transition
1890 * to Restricted Operation Mode shortly after.
1892 if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1893 (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1894 mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1895 netdev_dbg(priv->ndev,
1896 "Controller changed into %s Mode (%u).\n",
1897 mcp251xfd_get_mode_str(mode), mode);
1899 netdev_err(priv->ndev,
1900 "Controller changed into %s Mode (%u).\n",
1901 mcp251xfd_get_mode_str(mode), mode);
1903 /* After the application requests Normal mode, the controller
1904 * will automatically attempt to retransmit the message that
1905 * caused the TX MAB underflow.
1907 * However, if there is an ECC error in the TX-RAM, we first
1908 * have to reload the tx-object before requesting Normal
1909 * mode. This is done later in mcp251xfd_handle_eccif().
1911 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1912 *set_normal_mode = true;
1916 return mcp251xfd_chip_set_normal_mode_nowait(priv);
1919 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1921 struct mcp251xfd_ecc *ecc = &priv->ecc;
1922 struct net_device_stats *stats = &priv->ndev->stats;
1923 bool handled = false;
1927 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1928 * underflow is indicated by SERRIF and MODIF.
1930 * In addition to the effects mentioned in the Errata, there
1931 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1932 * will be seen as well.
1934 * Sometimes there is an ECC error in the TX-RAM, which leads
1935 * to a TX MAB underflow.
1937 * However, probably due to a race condition, there is no
1938 * associated MODIF pending.
1940 * Further, there are situations, where the SERRIF is caused
1941 * by an ECC error in the TX-RAM, but not even the ECCIF is
1942 * set. This only seems to happen _after_ the first occurrence
1943 * of a ECCIF (which is tracked in ecc->cnt).
1945 * Treat all as a known system errors..
1947 if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1948 priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1949 priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1953 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1955 msg = "TX MAB underflow due to ECC error detected.";
1957 msg = "TX MAB underflow detected.";
1959 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1960 netdev_dbg(priv->ndev, "%s\n", msg);
1962 netdev_info(priv->ndev, "%s\n", msg);
1964 stats->tx_aborted_errors++;
1971 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1972 * overflow is indicated by SERRIF.
1974 * In addition to the effects mentioned in the Errata, (most
1975 * of the times) a RXOVIF is raised, if the FIFO that is being
1976 * received into has the RXOVIE activated (and we have enabled
1977 * RXOVIE on all FIFOs).
1979 * Sometimes there is no RXOVIF just a RXIF is pending.
1981 * Treat all as a known system errors..
1983 if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1984 priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1985 stats->rx_dropped++;
1990 netdev_err(priv->ndev,
1991 "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1992 priv->regs_status.intf);
1998 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
2000 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2001 struct mcp251xfd_ecc *ecc = &priv->ecc;
2002 struct mcp251xfd_tx_obj *tx_obj;
2003 u8 chip_tx_tail, tx_tail, offset;
2007 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
2009 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
2013 tx_tail = mcp251xfd_get_tx_tail(tx_ring);
2014 offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
2016 /* Bail out if one of the following is met:
2017 * - tx_tail information is inconsistent
2018 * - for mcp2517fd: offset not 0
2019 * - for mcp2518fd: offset not 0 or 1
2021 if (chip_tx_tail != tx_tail ||
2022 !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2023 netdev_err(priv->ndev,
2024 "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2025 addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2030 netdev_info(priv->ndev,
2031 "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2032 ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2033 "Single" : "Double",
2034 addr, nr, tx_ring->tail, tx_tail, offset);
2036 /* reload tx_obj into controller RAM ... */
2037 tx_obj = &tx_ring->obj[nr];
2038 err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2042 /* ... and trigger retransmit */
2043 return mcp251xfd_chip_set_normal_mode(priv);
2047 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2049 struct mcp251xfd_ecc *ecc = &priv->ecc;
2057 err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2061 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2062 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2066 /* Check if ECC error occurred in TX-RAM */
2067 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2068 err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2071 else if (err == -ENOENT)
2076 /* Errata Reference:
2077 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2079 * ECC single error correction does not work in all cases:
2082 * Enable single error correction and double error detection
2083 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2084 * detection interrupt and do not rely on the error
2085 * correction. Instead, handle both interrupts as a
2086 * notification that the RAM word at ERRADDR was corrupted.
2088 if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2089 msg = "Single ECC Error detected at address";
2090 else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2091 msg = "Double ECC Error detected at address";
2098 netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2100 /* Re-occurring error? */
2101 if (ecc->ecc_stat == ecc_stat) {
2104 ecc->ecc_stat = ecc_stat;
2108 netdev_info(priv->ndev,
2109 "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2110 msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2112 if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2113 return mcp251xfd_handle_eccif_recover(priv, nr);
2116 if (set_normal_mode)
2117 return mcp251xfd_chip_set_normal_mode_nowait(priv);
2122 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2127 err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2131 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2132 MCP251XFD_REG_CRC_IF_MASK,
2137 if (crc & MCP251XFD_REG_CRC_FERRIF)
2138 netdev_notice(priv->ndev, "CRC write command format error.\n");
2139 else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2140 netdev_notice(priv->ndev,
2141 "CRC write error detected. CRC=0x%04lx.\n",
2142 FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2147 #define mcp251xfd_handle(priv, irq, ...) \
2149 struct mcp251xfd_priv *_priv = (priv); \
2152 err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2154 netdev_err(_priv->ndev, \
2155 "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2156 __stringify(irq), err); \
2160 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2162 struct mcp251xfd_priv *priv = dev_id;
2163 const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2164 irqreturn_t handled = IRQ_NONE;
2171 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2175 err = mcp251xfd_handle(priv, rxif);
2179 handled = IRQ_HANDLED;
2183 u32 intf_pending, intf_pending_clearable;
2184 bool set_normal_mode = false;
2186 err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2188 sizeof(priv->regs_status) /
2193 intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2194 priv->regs_status.intf) &
2195 FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2196 priv->regs_status.intf);
2198 if (!(intf_pending))
2201 /* Some interrupts must be ACKed in the
2202 * MCP251XFD_REG_INT register.
2203 * - First ACK then handle, to avoid lost-IRQ race
2204 * condition on fast re-occurring interrupts.
2205 * - Write "0" to clear active IRQs, "1" to all other,
2206 * to avoid r/m/w race condition on the
2207 * MCP251XFD_REG_INT register.
2209 intf_pending_clearable = intf_pending &
2210 MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2211 if (intf_pending_clearable) {
2212 err = regmap_update_bits(priv->map_reg,
2214 MCP251XFD_REG_INT_IF_MASK,
2215 ~intf_pending_clearable);
2220 if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2221 err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2226 if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2227 err = mcp251xfd_handle(priv, rxif);
2232 if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2233 err = mcp251xfd_handle(priv, tefif);
2238 if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2239 err = mcp251xfd_handle(priv, rxovif);
2244 if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2245 err = mcp251xfd_handle(priv, txatif);
2250 if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2251 err = mcp251xfd_handle(priv, ivmif);
2256 if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2257 err = mcp251xfd_handle(priv, serrif);
2262 if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2263 err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2268 if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2269 err = mcp251xfd_handle(priv, spicrcif);
2274 /* On the MCP2527FD and MCP2518FD, we don't get a
2275 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2278 if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2279 priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2280 err = mcp251xfd_handle(priv, cerrif);
2284 /* In Bus Off we completely shut down the
2285 * controller. Every subsequent register read
2286 * will read bogus data, and if
2287 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2288 * check will fail, too. So leave IRQ handler
2291 if (priv->can.state == CAN_STATE_BUS_OFF)
2295 handled = IRQ_HANDLED;
2299 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2300 err, priv->regs_status.intf);
2301 mcp251xfd_dump(priv);
2302 mcp251xfd_chip_interrupts_disable(priv);
2307 static inline struct
2308 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2312 tx_head = mcp251xfd_get_tx_head(tx_ring);
2314 return &tx_ring->obj[tx_head];
2318 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2319 struct mcp251xfd_tx_obj *tx_obj,
2320 const struct sk_buff *skb,
2323 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2324 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2325 union mcp251xfd_tx_obj_load_buf *load_buf;
2328 int len_sanitized = 0, len;
2330 if (cfd->can_id & CAN_EFF_FLAG) {
2333 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2334 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2336 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2337 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2339 flags = MCP251XFD_OBJ_FLAGS_IDE;
2341 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2345 /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2346 * harm, only the lower 7 bits will be transferred into the
2349 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2351 if (cfd->can_id & CAN_RTR_FLAG)
2352 flags |= MCP251XFD_OBJ_FLAGS_RTR;
2354 len_sanitized = canfd_sanitize_len(cfd->len);
2357 if (can_is_canfd_skb(skb)) {
2358 if (cfd->flags & CANFD_ESI)
2359 flags |= MCP251XFD_OBJ_FLAGS_ESI;
2361 flags |= MCP251XFD_OBJ_FLAGS_FDF;
2363 if (cfd->flags & CANFD_BRS)
2364 flags |= MCP251XFD_OBJ_FLAGS_BRS;
2366 dlc = can_fd_len2dlc(cfd->len);
2368 dlc = can_get_cc_dlc((struct can_frame *)cfd,
2369 priv->can.ctrlmode);
2372 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2374 load_buf = &tx_obj->buf;
2375 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2376 hw_tx_obj = &load_buf->crc.hw_tx_obj;
2378 hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2380 put_unaligned_le32(id, &hw_tx_obj->id);
2381 put_unaligned_le32(flags, &hw_tx_obj->flags);
2384 memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2386 /* Clear unused data at end of CAN frame */
2387 if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2390 pad_len = len_sanitized - cfd->len;
2392 memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2395 /* Number of bytes to be written into the RAM of the controller */
2396 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2397 if (MCP251XFD_SANITIZE_CAN)
2398 len += round_up(len_sanitized, sizeof(u32));
2400 len += round_up(cfd->len, sizeof(u32));
2402 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2405 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2408 len += sizeof(load_buf->crc.cmd);
2409 crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2410 put_unaligned_be16(crc, (void *)load_buf + len);
2413 len += sizeof(load_buf->crc.crc);
2415 len += sizeof(load_buf->nocrc.cmd);
2418 tx_obj->xfer[0].len = len;
2421 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2422 struct mcp251xfd_tx_obj *tx_obj)
2424 return spi_async(priv->spi, &tx_obj->msg);
2427 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2428 struct mcp251xfd_tx_ring *tx_ring)
2430 if (mcp251xfd_get_tx_free(tx_ring) > 0)
2433 netif_stop_queue(priv->ndev);
2435 /* Memory barrier before checking tx_free (head and tail) */
2438 if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2439 netdev_dbg(priv->ndev,
2440 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2441 tx_ring->head, tx_ring->tail,
2442 tx_ring->head - tx_ring->tail);
2447 netif_start_queue(priv->ndev);
2452 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2453 struct net_device *ndev)
2455 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2456 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2457 struct mcp251xfd_tx_obj *tx_obj;
2458 unsigned int frame_len;
2462 if (can_dropped_invalid_skb(ndev, skb))
2463 return NETDEV_TX_OK;
2465 if (mcp251xfd_tx_busy(priv, tx_ring))
2466 return NETDEV_TX_BUSY;
2468 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2469 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2471 /* Stop queue if we occupy the complete TX FIFO */
2472 tx_head = mcp251xfd_get_tx_head(tx_ring);
2474 if (mcp251xfd_get_tx_free(tx_ring) == 0)
2475 netif_stop_queue(ndev);
2477 frame_len = can_skb_get_frame_len(skb);
2478 err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
2480 netdev_sent_queue(priv->ndev, frame_len);
2482 err = mcp251xfd_tx_obj_write(priv, tx_obj);
2486 return NETDEV_TX_OK;
2489 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2491 return NETDEV_TX_OK;
2494 static int mcp251xfd_open(struct net_device *ndev)
2496 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2497 const struct spi_device *spi = priv->spi;
2500 err = pm_runtime_get_sync(ndev->dev.parent);
2502 pm_runtime_put_noidle(ndev->dev.parent);
2506 err = open_candev(ndev);
2508 goto out_pm_runtime_put;
2510 err = mcp251xfd_ring_alloc(priv);
2512 goto out_close_candev;
2514 err = mcp251xfd_transceiver_enable(priv);
2516 goto out_mcp251xfd_ring_free;
2518 err = mcp251xfd_chip_start(priv);
2520 goto out_transceiver_disable;
2522 mcp251xfd_timestamp_init(priv);
2523 can_rx_offload_enable(&priv->offload);
2525 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2526 IRQF_ONESHOT, dev_name(&spi->dev),
2529 goto out_can_rx_offload_disable;
2531 err = mcp251xfd_chip_interrupts_enable(priv);
2535 netif_start_queue(ndev);
2540 free_irq(spi->irq, priv);
2541 out_can_rx_offload_disable:
2542 can_rx_offload_disable(&priv->offload);
2543 mcp251xfd_timestamp_stop(priv);
2544 out_transceiver_disable:
2545 mcp251xfd_transceiver_disable(priv);
2546 out_mcp251xfd_ring_free:
2547 mcp251xfd_ring_free(priv);
2551 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2552 pm_runtime_put(ndev->dev.parent);
2557 static int mcp251xfd_stop(struct net_device *ndev)
2559 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2561 netif_stop_queue(ndev);
2562 mcp251xfd_chip_interrupts_disable(priv);
2563 free_irq(ndev->irq, priv);
2564 can_rx_offload_disable(&priv->offload);
2565 mcp251xfd_timestamp_stop(priv);
2566 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2567 mcp251xfd_transceiver_disable(priv);
2568 mcp251xfd_ring_free(priv);
2571 pm_runtime_put(ndev->dev.parent);
2576 static const struct net_device_ops mcp251xfd_netdev_ops = {
2577 .ndo_open = mcp251xfd_open,
2578 .ndo_stop = mcp251xfd_stop,
2579 .ndo_start_xmit = mcp251xfd_start_xmit,
2580 .ndo_change_mtu = can_change_mtu,
2584 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2586 const struct spi_device *spi = priv->spi;
2587 const struct spi_controller *ctlr = spi->controller;
2589 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2590 priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2593 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2595 const struct net_device *ndev = priv->ndev;
2596 const struct mcp251xfd_devtype_data *devtype_data;
2600 /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2601 * autodetect the model.
2603 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2604 MCP251XFD_REG_OSC_LPMEN,
2605 MCP251XFD_REG_OSC_LPMEN);
2609 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2613 if (osc & MCP251XFD_REG_OSC_LPMEN)
2614 devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2616 devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2618 if (!mcp251xfd_is_251X(priv) &&
2619 priv->devtype_data.model != devtype_data->model) {
2621 "Detected %s, but firmware specifies a %s. Fixing up.",
2622 __mcp251xfd_get_model_str(devtype_data->model),
2623 mcp251xfd_get_model_str(priv));
2625 priv->devtype_data = *devtype_data;
2627 /* We need to preserve the Half Duplex Quirk. */
2628 mcp251xfd_register_quirks(priv);
2630 /* Re-init regmap with quirks of detected model. */
2631 return mcp251xfd_regmap_init(priv);
2634 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2636 int err, rx_pending;
2641 err = mcp251xfd_chip_rx_int_enable(priv);
2645 /* Check if RX_INT is properly working. The RX_INT should not
2646 * be active after a softreset.
2648 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2650 err = mcp251xfd_chip_rx_int_disable(priv);
2657 netdev_info(priv->ndev,
2658 "RX_INT active after softreset, disabling RX_INT support.");
2659 devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2660 priv->rx_int = NULL;
2666 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2667 u32 *dev_id, u32 *effective_speed_hz)
2669 struct mcp251xfd_map_buf_nocrc *buf_rx;
2670 struct mcp251xfd_map_buf_nocrc *buf_tx;
2671 struct spi_transfer xfer[2] = { };
2674 buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2678 buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2681 goto out_kfree_buf_rx;
2684 xfer[0].tx_buf = buf_tx;
2685 xfer[0].len = sizeof(buf_tx->cmd);
2686 xfer[1].rx_buf = buf_rx->data;
2687 xfer[1].len = sizeof(dev_id);
2689 mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2690 err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2692 goto out_kfree_buf_tx;
2694 *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2695 *effective_speed_hz = xfer->effective_speed_hz;
2705 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2706 (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2709 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2711 u32 dev_id, effective_speed_hz;
2714 err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2715 &effective_speed_hz);
2719 netdev_info(priv->ndev,
2720 "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2721 mcp251xfd_get_model_str(priv),
2722 FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2723 FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2724 priv->rx_int ? '+' : '-',
2725 MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2726 MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2727 MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2728 MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2729 MCP251XFD_QUIRK_ACTIVE(ECC),
2730 MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2731 priv->can.clock.freq / 1000000,
2732 priv->can.clock.freq % 1000000 / 1000 / 10,
2733 priv->spi_max_speed_hz_orig / 1000000,
2734 priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2735 priv->spi->max_speed_hz / 1000000,
2736 priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2737 effective_speed_hz / 1000000,
2738 effective_speed_hz % 1000000 / 1000 / 10);
2743 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2745 struct net_device *ndev = priv->ndev;
2748 err = mcp251xfd_clks_and_vdd_enable(priv);
2752 pm_runtime_get_noresume(ndev->dev.parent);
2753 err = pm_runtime_set_active(ndev->dev.parent);
2755 goto out_runtime_put_noidle;
2756 pm_runtime_enable(ndev->dev.parent);
2758 mcp251xfd_register_quirks(priv);
2760 err = mcp251xfd_chip_softreset(priv);
2762 goto out_runtime_disable;
2764 goto out_chip_set_mode_sleep;
2766 err = mcp251xfd_register_chip_detect(priv);
2768 goto out_chip_set_mode_sleep;
2770 err = mcp251xfd_register_check_rx_int(priv);
2772 goto out_chip_set_mode_sleep;
2774 err = register_candev(ndev);
2776 goto out_chip_set_mode_sleep;
2778 err = mcp251xfd_register_done(priv);
2780 goto out_unregister_candev;
2782 /* Put controller into sleep mode and let pm_runtime_put()
2783 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2784 * the clocks and vdd will stay powered.
2786 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2788 goto out_unregister_candev;
2790 pm_runtime_put(ndev->dev.parent);
2794 out_unregister_candev:
2795 unregister_candev(ndev);
2796 out_chip_set_mode_sleep:
2797 mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2798 out_runtime_disable:
2799 pm_runtime_disable(ndev->dev.parent);
2800 out_runtime_put_noidle:
2801 pm_runtime_put_noidle(ndev->dev.parent);
2802 mcp251xfd_clks_and_vdd_disable(priv);
2807 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2809 struct net_device *ndev = priv->ndev;
2811 unregister_candev(ndev);
2813 pm_runtime_get_sync(ndev->dev.parent);
2814 pm_runtime_put_noidle(ndev->dev.parent);
2815 mcp251xfd_clks_and_vdd_disable(priv);
2816 pm_runtime_disable(ndev->dev.parent);
2819 static const struct of_device_id mcp251xfd_of_match[] = {
2821 .compatible = "microchip,mcp2517fd",
2822 .data = &mcp251xfd_devtype_data_mcp2517fd,
2824 .compatible = "microchip,mcp2518fd",
2825 .data = &mcp251xfd_devtype_data_mcp2518fd,
2827 .compatible = "microchip,mcp251xfd",
2828 .data = &mcp251xfd_devtype_data_mcp251xfd,
2833 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2835 static const struct spi_device_id mcp251xfd_id_table[] = {
2837 .name = "mcp2517fd",
2838 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2840 .name = "mcp2518fd",
2841 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2843 .name = "mcp251xfd",
2844 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2849 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2851 static int mcp251xfd_probe(struct spi_device *spi)
2854 struct net_device *ndev;
2855 struct mcp251xfd_priv *priv;
2856 struct gpio_desc *rx_int;
2857 struct regulator *reg_vdd, *reg_xceiver;
2863 return dev_err_probe(&spi->dev, -ENXIO,
2864 "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2866 rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2869 return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2870 "Failed to get RX-INT!\n");
2872 reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2873 if (PTR_ERR(reg_vdd) == -ENODEV)
2875 else if (IS_ERR(reg_vdd))
2876 return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2877 "Failed to get VDD regulator!\n");
2879 reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2880 if (PTR_ERR(reg_xceiver) == -ENODEV)
2882 else if (IS_ERR(reg_xceiver))
2883 return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2884 "Failed to get Transceiver regulator!\n");
2886 clk = devm_clk_get(&spi->dev, NULL);
2888 return dev_err_probe(&spi->dev, PTR_ERR(clk),
2889 "Failed to get Oscillator (clock)!\n");
2890 freq = clk_get_rate(clk);
2893 if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2894 freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2896 "Oscillator frequency (%u Hz) is too low or high.\n",
2901 if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2903 "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2908 ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2909 MCP251XFD_TX_OBJ_NUM_MAX);
2913 SET_NETDEV_DEV(ndev, &spi->dev);
2915 ndev->netdev_ops = &mcp251xfd_netdev_ops;
2916 ndev->irq = spi->irq;
2917 ndev->flags |= IFF_ECHO;
2919 priv = netdev_priv(ndev);
2920 spi_set_drvdata(spi, priv);
2921 priv->can.clock.freq = freq;
2922 priv->can.do_set_mode = mcp251xfd_set_mode;
2923 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2924 priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2925 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2926 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2927 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2928 CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2929 CAN_CTRLMODE_CC_LEN8_DLC;
2932 priv->rx_int = rx_int;
2934 priv->reg_vdd = reg_vdd;
2935 priv->reg_xceiver = reg_xceiver;
2937 match = device_get_match_data(&spi->dev);
2939 priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2941 priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2942 spi_get_device_id(spi)->driver_data;
2944 /* Errata Reference:
2945 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2947 * The SPI can write corrupted data to the RAM at fast SPI
2950 * Simultaneous activity on the CAN bus while writing data to
2951 * RAM via the SPI interface, with high SCK frequency, can
2952 * lead to corrupted data being written to RAM.
2955 * Ensure that FSCK is less than or equal to 0.85 *
2958 * Known good combinations are:
2960 * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk config
2962 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx>
2963 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx>
2964 * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default
2965 * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default
2966 * 2518 40 MHz fsl,imx6dl fsl,imx51-ecspi 15000000 Hz 75.00% 30000000 Hz default
2967 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2970 priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2971 spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2972 spi->bits_per_word = 8;
2974 err = spi_setup(spi);
2976 goto out_free_candev;
2978 err = mcp251xfd_regmap_init(priv);
2980 goto out_free_candev;
2982 err = can_rx_offload_add_manual(ndev, &priv->offload,
2983 MCP251XFD_NAPI_WEIGHT);
2985 goto out_free_candev;
2987 err = mcp251xfd_register(priv);
2989 goto out_free_candev;
2994 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
3001 static int mcp251xfd_remove(struct spi_device *spi)
3003 struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
3004 struct net_device *ndev = priv->ndev;
3006 can_rx_offload_del(&priv->offload);
3007 mcp251xfd_unregister(priv);
3008 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
3014 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
3016 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3018 return mcp251xfd_clks_and_vdd_disable(priv);
3021 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
3023 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3025 return mcp251xfd_clks_and_vdd_enable(priv);
3028 static const struct dev_pm_ops mcp251xfd_pm_ops = {
3029 SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3030 mcp251xfd_runtime_resume, NULL)
3033 static struct spi_driver mcp251xfd_driver = {
3035 .name = DEVICE_NAME,
3036 .pm = &mcp251xfd_pm_ops,
3037 .of_match_table = mcp251xfd_of_match,
3039 .probe = mcp251xfd_probe,
3040 .remove = mcp251xfd_remove,
3041 .id_table = mcp251xfd_id_table,
3043 module_spi_driver(mcp251xfd_driver);
3045 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3046 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3047 MODULE_LICENSE("GPL v2");