1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Cadence Design Systems Inc.
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
8 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
27 #define DEV_ID_I3C_MASTER 0x5034
29 #define CONF_STATUS0 0x4
30 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
31 #define CONF_STATUS0_ECC_CHK BIT(28)
32 #define CONF_STATUS0_INTEG_CHK BIT(27)
33 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
34 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
35 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
36 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
37 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
38 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
39 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
40 #define CONF_STATUS0_SEC_MASTER BIT(4)
41 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
43 #define CONF_STATUS1 0x8
44 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
45 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
48 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
49 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
50 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
53 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
54 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
55 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
56 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
59 #define CTRL_DEV_EN BIT(31)
60 #define CTRL_HALT_EN BIT(30)
61 #define CTRL_MCS BIT(29)
62 #define CTRL_MCS_EN BIT(28)
63 #define CTRL_HJ_DISEC BIT(8)
64 #define CTRL_MST_ACK BIT(7)
65 #define CTRL_HJ_ACK BIT(6)
66 #define CTRL_HJ_INIT BIT(5)
67 #define CTRL_MST_INIT BIT(4)
68 #define CTRL_AHDR_OPT BIT(3)
69 #define CTRL_PURE_BUS_MODE 0
70 #define CTRL_MIXED_FAST_BUS_MODE 2
71 #define CTRL_MIXED_SLOW_BUS_MODE 3
72 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
74 #define PRESCL_CTRL0 0x14
75 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
76 #define PRESCL_CTRL0_I3C(x) (x)
77 #define PRESCL_CTRL0_MAX GENMASK(9, 0)
79 #define PRESCL_CTRL1 0x18
80 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
81 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
82 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
83 #define PRESCL_CTRL1_OD_LOW(x) (x)
90 #define MST_INT_HALTED BIT(18)
91 #define MST_INT_MR_DONE BIT(17)
92 #define MST_INT_IMM_COMP BIT(16)
93 #define MST_INT_TX_THR BIT(15)
94 #define MST_INT_TX_OVF BIT(14)
95 #define MST_INT_IBID_THR BIT(12)
96 #define MST_INT_IBID_UNF BIT(11)
97 #define MST_INT_IBIR_THR BIT(10)
98 #define MST_INT_IBIR_UNF BIT(9)
99 #define MST_INT_IBIR_OVF BIT(8)
100 #define MST_INT_RX_THR BIT(7)
101 #define MST_INT_RX_UNF BIT(6)
102 #define MST_INT_CMDD_EMP BIT(5)
103 #define MST_INT_CMDD_THR BIT(4)
104 #define MST_INT_CMDD_OVF BIT(3)
105 #define MST_INT_CMDR_THR BIT(2)
106 #define MST_INT_CMDR_UNF BIT(1)
107 #define MST_INT_CMDR_OVF BIT(0)
109 #define MST_STATUS0 0x34
110 #define MST_STATUS0_IDLE BIT(18)
111 #define MST_STATUS0_HALTED BIT(17)
112 #define MST_STATUS0_MASTER_MODE BIT(16)
113 #define MST_STATUS0_TX_FULL BIT(13)
114 #define MST_STATUS0_IBID_FULL BIT(12)
115 #define MST_STATUS0_IBIR_FULL BIT(11)
116 #define MST_STATUS0_RX_FULL BIT(10)
117 #define MST_STATUS0_CMDD_FULL BIT(9)
118 #define MST_STATUS0_CMDR_FULL BIT(8)
119 #define MST_STATUS0_TX_EMP BIT(5)
120 #define MST_STATUS0_IBID_EMP BIT(4)
121 #define MST_STATUS0_IBIR_EMP BIT(3)
122 #define MST_STATUS0_RX_EMP BIT(2)
123 #define MST_STATUS0_CMDD_EMP BIT(1)
124 #define MST_STATUS0_CMDR_EMP BIT(0)
127 #define CMDR_NO_ERROR 0
128 #define CMDR_DDR_PREAMBLE_ERROR 1
129 #define CMDR_DDR_PARITY_ERROR 2
130 #define CMDR_DDR_RX_FIFO_OVF 3
131 #define CMDR_DDR_TX_FIFO_UNF 4
132 #define CMDR_M0_ERROR 5
133 #define CMDR_M1_ERROR 6
134 #define CMDR_M2_ERROR 7
135 #define CMDR_MST_ABORT 8
136 #define CMDR_NACK_RESP 9
137 #define CMDR_INVALID_DA 10
138 #define CMDR_DDR_DROPPED 11
139 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
140 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
141 #define CMDR_CMDID_HJACK_DISEC 0xfe
142 #define CMDR_CMDID_HJACK_ENTDAA 0xff
143 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
146 #define IBIR_ACKED BIT(12)
147 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
148 #define IBIR_ERROR BIT(7)
149 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
150 #define IBIR_TYPE_IBI 0
151 #define IBIR_TYPE_HJ 1
152 #define IBIR_TYPE_MR 2
153 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
160 #define SLV_INT_TM BIT(20)
161 #define SLV_INT_ERROR BIT(19)
162 #define SLV_INT_EVENT_UP BIT(18)
163 #define SLV_INT_HJ_DONE BIT(17)
164 #define SLV_INT_MR_DONE BIT(16)
165 #define SLV_INT_DA_UPD BIT(15)
166 #define SLV_INT_SDR_FAIL BIT(14)
167 #define SLV_INT_DDR_FAIL BIT(13)
168 #define SLV_INT_M_RD_ABORT BIT(12)
169 #define SLV_INT_DDR_RX_THR BIT(11)
170 #define SLV_INT_DDR_TX_THR BIT(10)
171 #define SLV_INT_SDR_RX_THR BIT(9)
172 #define SLV_INT_SDR_TX_THR BIT(8)
173 #define SLV_INT_DDR_RX_UNF BIT(7)
174 #define SLV_INT_DDR_TX_OVF BIT(6)
175 #define SLV_INT_SDR_RX_UNF BIT(5)
176 #define SLV_INT_SDR_TX_OVF BIT(4)
177 #define SLV_INT_DDR_RD_COMP BIT(3)
178 #define SLV_INT_DDR_WR_COMP BIT(2)
179 #define SLV_INT_SDR_RD_COMP BIT(1)
180 #define SLV_INT_SDR_WR_COMP BIT(0)
182 #define SLV_STATUS0 0x54
183 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
184 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
186 #define SLV_STATUS1 0x58
187 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
188 #define SLV_STATUS1_VEN_TM BIT(19)
189 #define SLV_STATUS1_HJ_DIS BIT(18)
190 #define SLV_STATUS1_MR_DIS BIT(17)
191 #define SLV_STATUS1_PROT_ERR BIT(16)
192 #define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
193 #define SLV_STATUS1_HAS_DA BIT(8)
194 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
195 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
196 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
197 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
198 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
199 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
200 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
201 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
203 #define CMD0_FIFO 0x60
204 #define CMD0_FIFO_IS_DDR BIT(31)
205 #define CMD0_FIFO_IS_CCC BIT(30)
206 #define CMD0_FIFO_BCH BIT(29)
207 #define XMIT_BURST_STATIC_SUBADDR 0
208 #define XMIT_SINGLE_INC_SUBADDR 1
209 #define XMIT_SINGLE_STATIC_SUBADDR 2
210 #define XMIT_BURST_WITHOUT_SUBADDR 3
211 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
212 #define CMD0_FIFO_SBCA BIT(26)
213 #define CMD0_FIFO_RSBC BIT(25)
214 #define CMD0_FIFO_IS_10B BIT(24)
215 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
216 #define CMD0_FIFO_PL_LEN_MAX 4095
217 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
218 #define CMD0_FIFO_RNW BIT(0)
220 #define CMD1_FIFO 0x64
221 #define CMD1_FIFO_CMDID(id) ((id) << 24)
222 #define CMD1_FIFO_CSRADDR(a) (a)
223 #define CMD1_FIFO_CCC(id) (id)
227 #define IMD_CMD0 0x70
228 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
229 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
230 #define IMD_CMD0_RNW BIT(0)
232 #define IMD_CMD1 0x74
233 #define IMD_CMD1_CCC(id) (id)
235 #define IMD_DATA 0x78
237 #define IBI_DATA_FIFO 0x84
238 #define SLV_DDR_TX_FIFO 0x88
239 #define SLV_DDR_RX_FIFO 0x8c
241 #define CMD_IBI_THR_CTRL 0x90
242 #define IBIR_THR(t) ((t) << 24)
243 #define CMDR_THR(t) ((t) << 16)
244 #define IBI_THR(t) ((t) << 8)
245 #define CMD_THR(t) (t)
247 #define TX_RX_THR_CTRL 0x94
248 #define RX_THR(t) ((t) << 16)
249 #define TX_THR(t) (t)
251 #define SLV_DDR_TX_RX_THR_CTRL 0x98
252 #define SLV_DDR_RX_THR(t) ((t) << 16)
253 #define SLV_DDR_TX_THR(t) (t)
255 #define FLUSH_CTRL 0x9c
256 #define FLUSH_IBI_RESP BIT(23)
257 #define FLUSH_CMD_RESP BIT(22)
258 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
259 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
260 #define FLUSH_IMM_FIFO BIT(20)
261 #define FLUSH_IBI_FIFO BIT(19)
262 #define FLUSH_RX_FIFO BIT(18)
263 #define FLUSH_TX_FIFO BIT(17)
264 #define FLUSH_CMD_FIFO BIT(16)
266 #define TTO_PRESCL_CTRL0 0xb0
267 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
268 #define TTO_PRESCL_CTRL0_DIVA(x) (x)
270 #define TTO_PRESCL_CTRL1 0xb4
271 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
272 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
274 #define DEVS_CTRL 0xb8
275 #define DEVS_CTRL_DEV_CLR_SHIFT 16
276 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
277 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
278 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
279 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
282 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
283 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
284 #define DEV_ID_RR0_HDR_CAP BIT(10)
285 #define DEV_ID_RR0_IS_I3C BIT(9)
286 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
287 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
288 (((a) & GENMASK(9, 7)) << 6))
289 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
290 (((x) >> 6) & GENMASK(9, 7)))
292 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
293 #define DEV_ID_RR1_PID_MSB(pid) (pid)
295 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
296 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
297 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
298 #define DEV_ID_RR2_DCR(dcr) (dcr)
299 #define DEV_ID_RR2_LVR(lvr) (lvr)
301 #define SIR_MAP(x) (0x180 + ((x) * 4))
302 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
303 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
304 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
305 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
306 #define DEV_ROLE_SLAVE 0
307 #define DEV_ROLE_MASTER 1
308 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
309 #define SIR_MAP_DEV_SLOW BIT(13)
310 #define SIR_MAP_DEV_PL(l) ((l) << 8)
311 #define SIR_MAP_PL_MAX GENMASK(4, 0)
312 #define SIR_MAP_DEV_DA(a) ((a) << 1)
313 #define SIR_MAP_DEV_ACK BIT(0)
315 #define GPIR_WORD(x) (0x200 + ((x) * 4))
316 #define GPI_REG(val, id) \
317 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
319 #define GPOR_WORD(x) (0x220 + ((x) * 4))
320 #define GPO_REG(val, id) \
321 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
323 #define ASF_INT_STATUS 0x300
324 #define ASF_INT_RAW_STATUS 0x304
325 #define ASF_INT_MASK 0x308
326 #define ASF_INT_TEST 0x30c
327 #define ASF_INT_FATAL_SELECT 0x310
328 #define ASF_INTEGRITY_ERR BIT(6)
329 #define ASF_PROTOCOL_ERR BIT(5)
330 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
331 #define ASF_CSR_ERR BIT(3)
332 #define ASF_DAP_ERR BIT(2)
333 #define ASF_SRAM_UNCORR_ERR BIT(1)
334 #define ASF_SRAM_CORR_ERR BIT(0)
336 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
337 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
338 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
339 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
341 #define ASF_SRAM_FAULT_STATS 0x328
342 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
343 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
345 #define ASF_TRANS_TOUT_CTRL 0x330
346 #define ASF_TRANS_TOUT_EN BIT(31)
347 #define ASF_TRANS_TOUT_VAL(x) (x)
349 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
350 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
351 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
352 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
353 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
354 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
356 #define ASF_PROTO_FAULT_MASK 0x340
357 #define ASF_PROTO_FAULT_STATUS 0x344
358 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
359 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
360 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
361 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
362 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
363 #define ASF_PROTO_FAULT_M(x) BIT(x)
365 struct cdns_i3c_master_caps {
373 struct cdns_i3c_cmd {
383 struct cdns_i3c_xfer {
384 struct list_head node;
385 struct completion comp;
388 struct cdns_i3c_cmd cmds[0];
391 struct cdns_i3c_master {
392 struct work_struct hj_work;
393 struct i3c_master_controller base;
395 unsigned int maxdevs;
397 unsigned int num_slots;
398 struct i3c_dev_desc **slots;
402 struct list_head list;
403 struct cdns_i3c_xfer *cur;
409 struct cdns_i3c_master_caps caps;
410 unsigned long i3c_scl_lim;
413 static inline struct cdns_i3c_master *
414 to_cdns_i3c_master(struct i3c_master_controller *master)
416 return container_of(master, struct cdns_i3c_master, base);
419 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
420 const u8 *bytes, int nbytes)
422 writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
426 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
427 writesl(master->regs + TX_FIFO, &tmp, 1);
431 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
432 u8 *bytes, int nbytes)
434 readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
438 readsl(master->regs + RX_FIFO, &tmp, 1);
439 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
443 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
444 const struct i3c_ccc_cmd *cmd)
450 case I3C_CCC_ENEC(true):
451 case I3C_CCC_ENEC(false):
452 case I3C_CCC_DISEC(true):
453 case I3C_CCC_DISEC(false):
454 case I3C_CCC_ENTAS(0, true):
455 case I3C_CCC_ENTAS(0, false):
456 case I3C_CCC_RSTDAA(true):
457 case I3C_CCC_RSTDAA(false):
459 case I3C_CCC_SETMWL(true):
460 case I3C_CCC_SETMWL(false):
461 case I3C_CCC_SETMRL(true):
462 case I3C_CCC_SETMRL(false):
463 case I3C_CCC_DEFSLVS:
464 case I3C_CCC_ENTHDR(0):
465 case I3C_CCC_SETDASA:
466 case I3C_CCC_SETNEWDA:
472 case I3C_CCC_GETSTATUS:
473 case I3C_CCC_GETACCMST:
474 case I3C_CCC_GETMXDS:
475 case I3C_CCC_GETHDRCAP:
484 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
488 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
490 return readl_poll_timeout(master->regs + MST_STATUS0, status,
491 status & MST_STATUS0_IDLE, 10, 1000000);
494 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
496 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
499 static struct cdns_i3c_xfer *
500 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
502 struct cdns_i3c_xfer *xfer;
504 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
508 INIT_LIST_HEAD(&xfer->node);
510 xfer->ret = -ETIMEDOUT;
515 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
520 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
522 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
528 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
529 for (i = 0; i < xfer->ncmds; i++) {
530 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
532 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
536 for (i = 0; i < xfer->ncmds; i++) {
537 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
539 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
540 master->regs + CMD1_FIFO);
541 writel(cmd->cmd0, master->regs + CMD0_FIFO);
544 writel(readl(master->regs + CTRL) | CTRL_MCS,
545 master->regs + CTRL);
546 writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
549 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
552 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
559 if (!(isr & MST_INT_CMDD_EMP))
562 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
564 for (status0 = readl(master->regs + MST_STATUS0);
565 !(status0 & MST_STATUS0_CMDR_EMP);
566 status0 = readl(master->regs + MST_STATUS0)) {
567 struct cdns_i3c_cmd *cmd;
568 u32 cmdr, rx_len, id;
570 cmdr = readl(master->regs + CMDR);
571 id = CMDR_CMDID(cmdr);
572 if (id == CMDR_CMDID_HJACK_DISEC ||
573 id == CMDR_CMDID_HJACK_ENTDAA ||
574 WARN_ON(id >= xfer->ncmds))
577 cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
578 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
579 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
580 cmd->error = CMDR_ERROR(cmdr);
583 for (i = 0; i < xfer->ncmds; i++) {
584 switch (xfer->cmds[i].error) {
588 case CMDR_DDR_PREAMBLE_ERROR:
589 case CMDR_DDR_PARITY_ERROR:
595 case CMDR_DDR_DROPPED:
599 case CMDR_DDR_RX_FIFO_OVF:
600 case CMDR_DDR_TX_FIFO_UNF:
604 case CMDR_INVALID_DA:
612 complete(&xfer->comp);
614 xfer = list_first_entry_or_null(&master->xferqueue.list,
615 struct cdns_i3c_xfer, node);
617 list_del_init(&xfer->node);
619 master->xferqueue.cur = xfer;
620 cdns_i3c_master_start_xfer_locked(master);
623 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
624 struct cdns_i3c_xfer *xfer)
628 init_completion(&xfer->comp);
629 spin_lock_irqsave(&master->xferqueue.lock, flags);
630 if (master->xferqueue.cur) {
631 list_add_tail(&xfer->node, &master->xferqueue.list);
633 master->xferqueue.cur = xfer;
634 cdns_i3c_master_start_xfer_locked(master);
636 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
639 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
640 struct cdns_i3c_xfer *xfer)
644 spin_lock_irqsave(&master->xferqueue.lock, flags);
645 if (master->xferqueue.cur == xfer) {
648 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
649 master->regs + CTRL);
650 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
651 status & MST_STATUS0_IDLE, 10,
653 master->xferqueue.cur = NULL;
654 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
656 master->regs + FLUSH_CTRL);
657 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
658 writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
659 master->regs + CTRL);
661 list_del_init(&xfer->node);
663 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
666 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
668 switch (cmd->error) {
683 return I3C_ERROR_UNKNOWN;
686 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
687 struct i3c_ccc_cmd *cmd)
689 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
690 struct cdns_i3c_xfer *xfer;
691 struct cdns_i3c_cmd *ccmd;
694 xfer = cdns_i3c_master_alloc_xfer(master, 1);
699 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
700 ccmd->cmd0 = CMD0_FIFO_IS_CCC |
701 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
703 if (cmd->id & I3C_CCC_DIRECT)
704 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
707 ccmd->cmd0 |= CMD0_FIFO_RNW;
708 ccmd->rx_buf = cmd->dests[0].payload.data;
709 ccmd->rx_len = cmd->dests[0].payload.len;
711 ccmd->tx_buf = cmd->dests[0].payload.data;
712 ccmd->tx_len = cmd->dests[0].payload.len;
715 cdns_i3c_master_queue_xfer(master, xfer);
716 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
717 cdns_i3c_master_unqueue_xfer(master, xfer);
720 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
721 cdns_i3c_master_free_xfer(xfer);
726 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
727 struct i3c_priv_xfer *xfers,
730 struct i3c_master_controller *m = i3c_dev_get_master(dev);
731 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
732 int txslots = 0, rxslots = 0, i, ret;
733 struct cdns_i3c_xfer *cdns_xfer;
735 for (i = 0; i < nxfers; i++) {
736 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
743 if (nxfers > master->caps.cmdfifodepth ||
744 nxfers > master->caps.cmdrfifodepth)
748 * First make sure that all transactions (block of transfers separated
749 * by a STOP marker) fit in the FIFOs.
751 for (i = 0; i < nxfers; i++) {
753 rxslots += DIV_ROUND_UP(xfers[i].len, 4);
755 txslots += DIV_ROUND_UP(xfers[i].len, 4);
758 if (rxslots > master->caps.rxfifodepth ||
759 txslots > master->caps.txfifodepth)
762 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
766 for (i = 0; i < nxfers; i++) {
767 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
768 u32 pl_len = xfers[i].len;
770 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
771 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
774 ccmd->cmd0 |= CMD0_FIFO_RNW;
775 ccmd->rx_buf = xfers[i].data.in;
776 ccmd->rx_len = xfers[i].len;
779 ccmd->tx_buf = xfers[i].data.out;
780 ccmd->tx_len = xfers[i].len;
783 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
786 ccmd->cmd0 |= CMD0_FIFO_RSBC;
789 ccmd->cmd0 |= CMD0_FIFO_BCH;
792 cdns_i3c_master_queue_xfer(master, cdns_xfer);
793 if (!wait_for_completion_timeout(&cdns_xfer->comp,
794 msecs_to_jiffies(1000)))
795 cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
797 ret = cdns_xfer->ret;
799 for (i = 0; i < nxfers; i++)
800 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
802 cdns_i3c_master_free_xfer(cdns_xfer);
807 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
808 const struct i2c_msg *xfers, int nxfers)
810 struct i3c_master_controller *m = i2c_dev_get_master(dev);
811 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
812 unsigned int nrxwords = 0, ntxwords = 0;
813 struct cdns_i3c_xfer *xfer;
816 if (nxfers > master->caps.cmdfifodepth)
819 for (i = 0; i < nxfers; i++) {
820 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
823 if (xfers[i].flags & I2C_M_RD)
824 nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
826 ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
829 if (ntxwords > master->caps.txfifodepth ||
830 nrxwords > master->caps.rxfifodepth)
833 xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
837 for (i = 0; i < nxfers; i++) {
838 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
840 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
841 CMD0_FIFO_PL_LEN(xfers[i].len) |
842 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
844 if (xfers[i].flags & I2C_M_TEN)
845 ccmd->cmd0 |= CMD0_FIFO_IS_10B;
847 if (xfers[i].flags & I2C_M_RD) {
848 ccmd->cmd0 |= CMD0_FIFO_RNW;
849 ccmd->rx_buf = xfers[i].buf;
850 ccmd->rx_len = xfers[i].len;
852 ccmd->tx_buf = xfers[i].buf;
853 ccmd->tx_len = xfers[i].len;
857 cdns_i3c_master_queue_xfer(master, xfer);
858 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
859 cdns_i3c_master_unqueue_xfer(master, xfer);
862 cdns_i3c_master_free_xfer(xfer);
867 struct cdns_i3c_i2c_dev_data {
870 struct i3c_generic_ibi_pool *ibi_pool;
873 static u32 prepare_rr0_dev_address(u32 addr)
875 u32 ret = (addr << 1) & 0xff;
877 /* RR0[7:1] = addr[6:0] */
878 ret |= (addr & GENMASK(6, 0)) << 1;
880 /* RR0[15:13] = addr[9:7] */
881 ret |= (addr & GENMASK(9, 7)) << 6;
883 /* RR0[0] = ~XOR(addr[6:0]) */
884 if (!(hweight8(addr & 0x7f) & 1))
890 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
892 struct i3c_master_controller *m = i3c_dev_get_master(dev);
893 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
894 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
897 rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
899 dev->info.static_addr);
900 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
903 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
910 if (!master->free_rr_slots)
913 return ffs(master->free_rr_slots) - 1;
916 activedevs = readl(master->regs + DEVS_CTRL) &
917 DEVS_CTRL_DEVS_ACTIVE_MASK;
919 for (i = 1; i <= master->maxdevs; i++) {
920 if (!(BIT(i) & activedevs))
923 rr = readl(master->regs + DEV_ID_RR0(i));
924 if (!(rr & DEV_ID_RR0_IS_I3C) ||
925 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
934 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
937 cdns_i3c_master_upd_i3c_addr(dev);
942 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
944 struct i3c_master_controller *m = i3c_dev_get_master(dev);
945 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
946 struct cdns_i3c_i2c_dev_data *data;
949 data = kzalloc(sizeof(*data), GFP_KERNEL);
953 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
961 i3c_dev_set_master_data(dev, data);
962 master->free_rr_slots &= ~BIT(slot);
964 if (!dev->info.dyn_addr) {
965 cdns_i3c_master_upd_i3c_addr(dev);
966 writel(readl(master->regs + DEVS_CTRL) |
967 DEVS_CTRL_DEV_ACTIVE(data->id),
968 master->regs + DEVS_CTRL);
974 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
976 struct i3c_master_controller *m = i3c_dev_get_master(dev);
977 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
978 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
980 writel(readl(master->regs + DEVS_CTRL) |
981 DEVS_CTRL_DEV_CLR(data->id),
982 master->regs + DEVS_CTRL);
984 i3c_dev_set_master_data(dev, NULL);
985 master->free_rr_slots |= BIT(data->id);
989 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
991 struct i3c_master_controller *m = i2c_dev_get_master(dev);
992 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
993 struct cdns_i3c_i2c_dev_data *data;
996 slot = cdns_i3c_master_get_rr_slot(master, 0);
1000 data = kzalloc(sizeof(*data), GFP_KERNEL);
1005 master->free_rr_slots &= ~BIT(slot);
1006 i2c_dev_set_master_data(dev, data);
1008 writel(prepare_rr0_dev_address(dev->boardinfo->base.addr),
1009 master->regs + DEV_ID_RR0(data->id));
1010 writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
1011 writel(readl(master->regs + DEVS_CTRL) |
1012 DEVS_CTRL_DEV_ACTIVE(data->id),
1013 master->regs + DEVS_CTRL);
1018 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1020 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1021 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1022 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1024 writel(readl(master->regs + DEVS_CTRL) |
1025 DEVS_CTRL_DEV_CLR(data->id),
1026 master->regs + DEVS_CTRL);
1027 master->free_rr_slots |= BIT(data->id);
1029 i2c_dev_set_master_data(dev, NULL);
1033 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1035 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1037 cdns_i3c_master_disable(master);
1040 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1042 struct i3c_device_info *info)
1046 memset(info, 0, sizeof(*info));
1047 rr = readl(master->regs + DEV_ID_RR0(slot));
1048 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1049 rr = readl(master->regs + DEV_ID_RR2(slot));
1051 info->bcr = rr >> 8;
1052 info->pid = rr >> 16;
1053 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1056 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1058 struct i3c_master_controller *m = &master->base;
1059 unsigned long i3c_lim_period, pres_step, ncycles;
1060 struct i3c_bus *bus = i3c_master_get_bus(m);
1061 unsigned long new_i3c_scl_lim = 0;
1062 struct i3c_dev_desc *dev;
1065 i3c_bus_for_each_i3cdev(bus, dev) {
1066 unsigned long max_fscl;
1068 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1069 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1071 case I3C_SDR1_FSCL_8MHZ:
1074 case I3C_SDR2_FSCL_6MHZ:
1077 case I3C_SDR3_FSCL_4MHZ:
1080 case I3C_SDR4_FSCL_2MHZ:
1083 case I3C_SDR0_FSCL_MAX:
1090 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1091 new_i3c_scl_lim = max_fscl;
1094 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1095 if (new_i3c_scl_lim == master->i3c_scl_lim)
1097 master->i3c_scl_lim = new_i3c_scl_lim;
1098 if (!new_i3c_scl_lim)
1100 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1102 /* Configure PP_LOW to meet I3C slave limitations. */
1103 prescl1 = readl(master->regs + PRESCL_CTRL1) &
1104 ~PRESCL_CTRL1_PP_LOW_MASK;
1105 ctrl = readl(master->regs + CTRL);
1107 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1108 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1114 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1116 /* Disable I3C master before updating PRESCL_CTRL1. */
1117 if (ctrl & CTRL_DEV_EN)
1118 cdns_i3c_master_disable(master);
1120 writel(prescl1, master->regs + PRESCL_CTRL1);
1122 if (ctrl & CTRL_DEV_EN)
1123 cdns_i3c_master_enable(master);
1126 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1128 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1129 u32 olddevs, newdevs;
1131 u8 addrs[MAX_DEVS] = { };
1134 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1136 /* Prepare RR slots before launching DAA. */
1137 for (slot = 1; slot <= master->maxdevs; slot++) {
1138 if (olddevs & BIT(slot))
1141 ret = i3c_master_get_free_addr(m, last_addr + 1);
1146 addrs[slot] = last_addr;
1147 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1148 master->regs + DEV_ID_RR0(slot));
1149 writel(0, master->regs + DEV_ID_RR1(slot));
1150 writel(0, master->regs + DEV_ID_RR2(slot));
1153 ret = i3c_master_entdaa_locked(&master->base);
1154 if (ret && ret != I3C_ERROR_M2)
1157 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1158 newdevs &= ~olddevs;
1161 * Clear all retaining registers filled during DAA. We already
1162 * have the addressed assigned to them in the addrs array.
1164 for (slot = 1; slot <= master->maxdevs; slot++) {
1165 if (newdevs & BIT(slot))
1166 i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1170 * Clear slots that ended up not being used. Can be caused by I3C
1171 * device creation failure or when the I3C device was already known
1172 * by the system but with a different address (in this case the device
1173 * already has a slot and does not need a new one).
1175 writel(readl(master->regs + DEVS_CTRL) |
1176 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1177 master->regs + DEVS_CTRL);
1179 i3c_master_defslvs_locked(&master->base);
1181 cdns_i3c_master_upd_i3c_scl_lim(master);
1183 /* Unmask Hot-Join and Mastership request interrupts. */
1184 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1185 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1190 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1192 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1193 unsigned long pres_step, sysclk_rate, max_i2cfreq;
1194 struct i3c_bus *bus = i3c_master_get_bus(m);
1195 u32 ctrl, prescl0, prescl1, pres, low;
1196 struct i3c_device_info info = { };
1199 switch (bus->mode) {
1200 case I3C_BUS_MODE_PURE:
1201 ctrl = CTRL_PURE_BUS_MODE;
1204 case I3C_BUS_MODE_MIXED_FAST:
1205 ctrl = CTRL_MIXED_FAST_BUS_MODE;
1208 case I3C_BUS_MODE_MIXED_SLOW:
1209 ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1216 sysclk_rate = clk_get_rate(master->sysclk);
1220 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1221 if (pres > PRESCL_CTRL0_MAX)
1224 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1226 prescl0 = PRESCL_CTRL0_I3C(pres);
1228 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1229 prescl1 = PRESCL_CTRL1_OD_LOW(low);
1231 max_i2cfreq = bus->scl_rate.i2c;
1233 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1234 if (pres > PRESCL_CTRL0_MAX)
1237 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1239 prescl0 |= PRESCL_CTRL0_I2C(pres);
1240 writel(prescl0, master->regs + PRESCL_CTRL0);
1242 /* Calculate OD and PP low. */
1243 pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1244 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1247 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1248 writel(prescl1, master->regs + PRESCL_CTRL1);
1250 /* Get an address for the master. */
1251 ret = i3c_master_get_free_addr(m, 0);
1255 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1256 master->regs + DEV_ID_RR0(0));
1258 cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1259 if (info.bcr & I3C_BCR_HDR_CAP)
1260 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1262 ret = i3c_master_set_info(&master->base, &info);
1267 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1268 * events coming from this device.
1270 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1272 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1273 writel(ctrl, master->regs + CTRL);
1275 cdns_i3c_master_enable(master);
1280 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1283 struct cdns_i3c_i2c_dev_data *data;
1284 bool data_consumed = false;
1285 struct i3c_ibi_slot *slot;
1286 u32 id = IBIR_SLVID(ibir);
1287 struct i3c_dev_desc *dev;
1292 * FIXME: maybe we should report the FIFO OVF errors to the upper
1295 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1298 dev = master->ibi.slots[id];
1299 spin_lock(&master->ibi.lock);
1301 data = i3c_dev_get_master_data(dev);
1302 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1308 nbytes = IBIR_XFER_BYTES(ibir);
1309 readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
1311 u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
1313 memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
1316 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1317 dev->ibi->max_payload_len);
1318 i3c_master_queue_ibi(dev, slot);
1319 data_consumed = true;
1322 spin_unlock(&master->ibi.lock);
1325 /* Consume data from the FIFO if it's not been done already. */
1326 if (!data_consumed) {
1329 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1330 readl(master->regs + IBI_DATA_FIFO);
1334 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1338 writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1340 for (status0 = readl(master->regs + MST_STATUS0);
1341 !(status0 & MST_STATUS0_IBIR_EMP);
1342 status0 = readl(master->regs + MST_STATUS0)) {
1343 u32 ibir = readl(master->regs + IBIR);
1345 switch (IBIR_TYPE(ibir)) {
1347 cdns_i3c_master_handle_ibi(master, ibir);
1351 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1352 queue_work(master->base.wq, &master->hj_work);
1356 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1363 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1365 struct cdns_i3c_master *master = data;
1368 status = readl(master->regs + MST_ISR);
1369 if (!(status & readl(master->regs + MST_IMR)))
1372 spin_lock(&master->xferqueue.lock);
1373 cdns_i3c_master_end_xfer_locked(master, status);
1374 spin_unlock(&master->xferqueue.lock);
1376 if (status & MST_INT_IBIR_THR)
1377 cnds_i3c_master_demux_ibis(master);
1382 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1384 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1385 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1386 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1387 unsigned long flags;
1391 ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1396 spin_lock_irqsave(&master->ibi.lock, flags);
1397 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1398 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1399 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1400 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1401 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1402 spin_unlock_irqrestore(&master->ibi.lock, flags);
1407 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1409 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1410 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1411 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1412 unsigned long flags;
1416 spin_lock_irqsave(&master->ibi.lock, flags);
1417 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1418 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1419 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1420 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1421 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1424 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1425 sircfg |= SIR_MAP_DEV_SLOW;
1427 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1428 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1429 spin_unlock_irqrestore(&master->ibi.lock, flags);
1431 ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1434 spin_lock_irqsave(&master->ibi.lock, flags);
1435 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1436 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1437 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1438 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1439 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1440 spin_unlock_irqrestore(&master->ibi.lock, flags);
1446 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1447 const struct i3c_ibi_setup *req)
1449 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1450 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1451 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1452 unsigned long flags;
1455 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1456 if (IS_ERR(data->ibi_pool))
1457 return PTR_ERR(data->ibi_pool);
1459 spin_lock_irqsave(&master->ibi.lock, flags);
1460 for (i = 0; i < master->ibi.num_slots; i++) {
1461 if (!master->ibi.slots[i]) {
1463 master->ibi.slots[i] = dev;
1467 spin_unlock_irqrestore(&master->ibi.lock, flags);
1469 if (i < master->ibi.num_slots)
1472 i3c_generic_ibi_free_pool(data->ibi_pool);
1473 data->ibi_pool = NULL;
1478 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1480 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1481 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1482 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1483 unsigned long flags;
1485 spin_lock_irqsave(&master->ibi.lock, flags);
1486 master->ibi.slots[data->ibi] = NULL;
1488 spin_unlock_irqrestore(&master->ibi.lock, flags);
1490 i3c_generic_ibi_free_pool(data->ibi_pool);
1493 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1494 struct i3c_ibi_slot *slot)
1496 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1498 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1501 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1502 .bus_init = cdns_i3c_master_bus_init,
1503 .bus_cleanup = cdns_i3c_master_bus_cleanup,
1504 .do_daa = cdns_i3c_master_do_daa,
1505 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1506 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1507 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1508 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1509 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1510 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1511 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1512 .priv_xfers = cdns_i3c_master_priv_xfers,
1513 .i2c_xfers = cdns_i3c_master_i2c_xfers,
1514 .enable_ibi = cdns_i3c_master_enable_ibi,
1515 .disable_ibi = cdns_i3c_master_disable_ibi,
1516 .request_ibi = cdns_i3c_master_request_ibi,
1517 .free_ibi = cdns_i3c_master_free_ibi,
1518 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1521 static void cdns_i3c_master_hj(struct work_struct *work)
1523 struct cdns_i3c_master *master = container_of(work,
1524 struct cdns_i3c_master,
1527 i3c_master_do_daa(&master->base);
1530 static int cdns_i3c_master_probe(struct platform_device *pdev)
1532 struct cdns_i3c_master *master;
1533 struct resource *res;
1537 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1541 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1542 master->regs = devm_ioremap_resource(&pdev->dev, res);
1543 if (IS_ERR(master->regs))
1544 return PTR_ERR(master->regs);
1546 master->pclk = devm_clk_get(&pdev->dev, "pclk");
1547 if (IS_ERR(master->pclk))
1548 return PTR_ERR(master->pclk);
1550 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1551 if (IS_ERR(master->sysclk))
1552 return PTR_ERR(master->sysclk);
1554 irq = platform_get_irq(pdev, 0);
1558 ret = clk_prepare_enable(master->pclk);
1562 ret = clk_prepare_enable(master->sysclk);
1564 goto err_disable_pclk;
1566 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
1568 goto err_disable_sysclk;
1571 spin_lock_init(&master->xferqueue.lock);
1572 INIT_LIST_HEAD(&master->xferqueue.list);
1574 INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1575 writel(0xffffffff, master->regs + MST_IDR);
1576 writel(0xffffffff, master->regs + SLV_IDR);
1577 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1578 dev_name(&pdev->dev), master);
1580 goto err_disable_sysclk;
1582 platform_set_drvdata(pdev, master);
1584 val = readl(master->regs + CONF_STATUS0);
1586 /* Device ID0 is reserved to describe this master. */
1587 master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1588 master->free_rr_slots = GENMASK(master->maxdevs, 1);
1590 val = readl(master->regs + CONF_STATUS1);
1591 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1592 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1593 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1594 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1595 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1597 spin_lock_init(&master->ibi.lock);
1598 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1599 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1600 sizeof(*master->ibi.slots),
1602 if (!master->ibi.slots)
1603 goto err_disable_sysclk;
1605 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1606 writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1607 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1609 ret = i3c_master_register(&master->base, &pdev->dev,
1610 &cdns_i3c_master_ops, false);
1612 goto err_disable_sysclk;
1617 clk_disable_unprepare(master->sysclk);
1620 clk_disable_unprepare(master->pclk);
1625 static int cdns_i3c_master_remove(struct platform_device *pdev)
1627 struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1630 ret = i3c_master_unregister(&master->base);
1634 clk_disable_unprepare(master->sysclk);
1635 clk_disable_unprepare(master->pclk);
1640 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1641 { .compatible = "cdns,i3c-master" },
1645 static struct platform_driver cdns_i3c_master = {
1646 .probe = cdns_i3c_master_probe,
1647 .remove = cdns_i3c_master_remove,
1649 .name = "cdns-i3c-master",
1650 .of_match_table = cdns_i3c_master_of_ids,
1653 module_platform_driver(cdns_i3c_master);
1655 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1656 MODULE_DESCRIPTION("Cadence I3C master driver");
1657 MODULE_LICENSE("GPL v2");
1658 MODULE_ALIAS("platform:cdns-i3c-master");