1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (C) IBM Corporation 2020
4 #include <linux/bitfield.h>
5 #include <linux/bits.h>
7 #include <linux/jiffies.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/spi/spi.h>
13 #define FSI_ENGID_SPI 0x23
14 #define FSI_MBOX_ROOT_CTRL_8 0x2860
15 #define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
17 #define FSI2SPI_DATA0 0x00
18 #define FSI2SPI_DATA1 0x04
19 #define FSI2SPI_CMD 0x08
20 #define FSI2SPI_CMD_WRITE BIT(31)
21 #define FSI2SPI_RESET 0x18
22 #define FSI2SPI_STATUS 0x1c
23 #define FSI2SPI_STATUS_ANY_ERROR BIT(31)
24 #define FSI2SPI_IRQ 0x20
26 #define SPI_FSI_BASE 0x70000
27 #define SPI_FSI_INIT_TIMEOUT_MS 1000
28 #define SPI_FSI_MAX_XFR_SIZE 2048
29 #define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
31 #define SPI_FSI_ERROR 0x0
32 #define SPI_FSI_COUNTER_CFG 0x1
33 #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
34 #define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
35 #define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
36 #define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
37 #define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
38 #define SPI_FSI_CFG1 0x2
39 #define SPI_FSI_CLOCK_CFG 0x3
40 #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
41 #define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
42 #define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
43 #define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
44 #define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
45 #define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
46 #define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
47 #define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
48 #define SPI_FSI_MMAP 0x4
49 #define SPI_FSI_DATA_TX 0x5
50 #define SPI_FSI_DATA_RX 0x6
51 #define SPI_FSI_SEQUENCE 0x7
52 #define SPI_FSI_SEQUENCE_STOP 0x00
53 #define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
54 #define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
55 #define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
56 #define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
57 #define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
58 #define SPI_FSI_STATUS 0x8
59 #define SPI_FSI_STATUS_ERROR \
60 (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
61 #define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
62 #define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
63 #define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
64 #define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
65 #define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
66 #define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
67 #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
68 #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
69 #define SPI_FSI_STATUS_ANY_ERROR \
70 (SPI_FSI_STATUS_ERROR | \
71 SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
72 SPI_FSI_STATUS_RDR_OVERRUN)
73 #define SPI_FSI_PORT_CTRL 0x9
76 struct device *dev; /* SPI controller device */
77 struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
83 struct fsi_spi_sequence {
88 static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
92 __be32 root_ctrl_8_be;
94 rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
95 sizeof(root_ctrl_8_be));
99 root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
100 dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
101 if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
102 FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
108 static int fsi_spi_check_status(struct fsi_spi *ctx)
114 rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
119 sts = be32_to_cpu(sts_be);
120 if (sts & FSI2SPI_STATUS_ANY_ERROR) {
121 dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
128 static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
133 u32 cmd = offset + ctx->base;
137 if (cmd & FSI2SPI_CMD_WRITE)
140 cmd_be = cpu_to_be32(cmd);
141 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
145 rc = fsi_spi_check_status(ctx);
149 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
154 *value |= (u64)be32_to_cpu(data_be) << 32;
156 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
161 *value |= (u64)be32_to_cpu(data_be);
162 dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
167 static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
172 u32 cmd = offset + ctx->base;
174 if (cmd & FSI2SPI_CMD_WRITE)
177 dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
179 data_be = cpu_to_be32(upper_32_bits(value));
180 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
185 data_be = cpu_to_be32(lower_32_bits(value));
186 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
191 cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
192 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
196 return fsi_spi_check_status(ctx);
199 static int fsi_spi_data_in(u64 in, u8 *rx, int len)
202 int num_bytes = min(len, 8);
204 for (i = 0; i < num_bytes; ++i)
205 rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
210 static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
213 int num_bytes = min(len, 8);
214 u8 *out_bytes = (u8 *)out;
216 /* Unused bytes of the tx data should be 0. */
219 for (i = 0; i < num_bytes; ++i)
220 out_bytes[8 - (i + 1)] = tx[i];
225 static int fsi_spi_reset(struct fsi_spi *ctx)
229 dev_dbg(ctx->dev, "Resetting SPI controller.\n");
231 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
232 SPI_FSI_CLOCK_CFG_RESET1);
236 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
237 SPI_FSI_CLOCK_CFG_RESET2);
241 return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
244 static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
247 * Add the next byte of instruction to the 8-byte sequence register.
248 * Then decrement the counter so that the next instruction will go in
249 * the right place. Return the index of the slot we just filled in the
252 seq->data |= (u64)val << seq->bit;
255 return ((64 - seq->bit) / 8) - 2;
258 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
264 static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
265 struct fsi_spi_sequence *seq,
266 struct spi_transfer *transfer)
273 u8 len = min(transfer->len, 8U);
274 u8 rem = transfer->len % len;
277 loops = transfer->len / len;
279 if (transfer->tx_buf) {
280 val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
281 idx = fsi_spi_sequence_add(seq, val);
284 rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
285 } else if (transfer->rx_buf) {
286 val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
287 idx = fsi_spi_sequence_add(seq, val);
290 rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
295 if (ctx->restricted) {
296 const int eidx = rem ? 5 : 6;
298 while (loops > 1 && idx <= eidx) {
299 idx = fsi_spi_sequence_add(seq, val);
305 dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
311 fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
316 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
317 if (transfer->rx_buf)
318 cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
319 SPI_FSI_COUNTER_CFG_N2_TX |
320 SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
321 SPI_FSI_COUNTER_CFG_N2_RELOAD;
323 rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
327 fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
331 fsi_spi_sequence_add(seq, rem);
336 static int fsi_spi_transfer_data(struct fsi_spi *ctx,
337 struct spi_transfer *transfer)
343 if (transfer->tx_buf) {
347 const u8 *tx = transfer->tx_buf;
349 while (transfer->len > sent) {
350 nb = fsi_spi_data_out(&out, &tx[sent],
351 (int)transfer->len - sent);
353 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
358 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
363 if (status & SPI_FSI_STATUS_ANY_ERROR) {
364 rc = fsi_spi_reset(ctx);
370 } while (status & SPI_FSI_STATUS_TDR_FULL);
374 } else if (transfer->rx_buf) {
377 u8 *rx = transfer->rx_buf;
379 rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
383 if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
384 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
389 while (transfer->len > recv) {
391 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
396 if (status & SPI_FSI_STATUS_ANY_ERROR) {
397 rc = fsi_spi_reset(ctx);
403 } while (!(status & SPI_FSI_STATUS_RDR_FULL));
405 rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
409 recv += fsi_spi_data_in(in, &rx[recv],
410 (int)transfer->len - recv);
417 static int fsi_spi_transfer_init(struct fsi_spi *ctx)
423 u64 clock_cfg = 0ULL;
425 u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
426 SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
427 FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
429 end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
431 if (time_after(jiffies, end))
434 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
438 seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
440 if (status & (SPI_FSI_STATUS_ANY_ERROR |
441 SPI_FSI_STATUS_TDR_FULL |
442 SPI_FSI_STATUS_RDR_FULL)) {
446 rc = fsi_spi_reset(ctx);
453 } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
455 rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
459 if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
460 SPI_FSI_CLOCK_CFG_ECC_DISABLE |
461 SPI_FSI_CLOCK_CFG_MODE |
462 SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
463 SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
464 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
470 static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
471 struct spi_message *mesg)
474 u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
475 struct spi_transfer *transfer;
476 struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
478 rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
482 list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
483 struct fsi_spi_sequence seq;
484 struct spi_transfer *next = NULL;
486 /* Sequencer must do shift out (tx) first. */
487 if (!transfer->tx_buf ||
488 transfer->len > (ctx->max_xfr_size + 8)) {
493 dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
495 rc = fsi_spi_transfer_init(ctx);
499 fsi_spi_sequence_init(&seq);
500 fsi_spi_sequence_add(&seq, seq_slave);
502 rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
506 if (!list_is_last(&transfer->transfer_list,
508 next = list_next_entry(transfer, transfer_list);
510 /* Sequencer can only do shift in (rx) after tx. */
512 if (next->len > ctx->max_xfr_size) {
517 dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
520 rc = fsi_spi_sequence_transfer(ctx, &seq,
529 fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
531 rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
535 rc = fsi_spi_transfer_data(ctx, transfer);
540 rc = fsi_spi_transfer_data(ctx, next);
550 spi_finalize_current_message(ctlr);
555 static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
557 struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
559 return ctx->max_xfr_size;
562 static int fsi_spi_probe(struct device *dev)
565 struct device_node *np;
566 int num_controllers_registered = 0;
567 struct fsi_device *fsi = to_fsi_dev(dev);
569 rc = fsi_spi_check_mux(fsi, dev);
573 for_each_available_child_of_node(dev->of_node, np) {
576 struct spi_controller *ctlr;
578 if (of_property_read_u32(np, "reg", &base))
581 ctlr = spi_alloc_master(dev, sizeof(*ctx));
585 ctlr->dev.of_node = np;
586 ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
587 ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
588 ctlr->max_transfer_size = fsi_spi_max_transfer_size;
589 ctlr->transfer_one_message = fsi_spi_transfer_one_message;
591 ctx = spi_controller_get_devdata(ctlr);
592 ctx->dev = &ctlr->dev;
594 ctx->base = base + SPI_FSI_BASE;
596 if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
597 ctx->restricted = true;
598 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
600 ctx->restricted = false;
601 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
604 rc = devm_spi_register_controller(dev, ctlr);
606 spi_controller_put(ctlr);
608 num_controllers_registered++;
611 if (!num_controllers_registered)
617 static const struct fsi_device_id fsi_spi_ids[] = {
618 { FSI_ENGID_SPI, FSI_VERSION_ANY },
621 MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
623 static struct fsi_driver fsi_spi_driver = {
624 .id_table = fsi_spi_ids,
627 .bus = &fsi_bus_type,
628 .probe = fsi_spi_probe,
631 module_fsi_driver(fsi_spi_driver);
633 MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
634 MODULE_DESCRIPTION("FSI attached SPI controller");
635 MODULE_LICENSE("GPL");