1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Cadence Design Systems Inc. 4 * 5 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/i3c/master.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/iopoll.h> 16 #include <linux/ioport.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/workqueue.h> 25 26 #define DEV_ID 0x0 27 #define DEV_ID_I3C_MASTER 0x5034 28 29 #define CONF_STATUS0 0x4 30 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29)) 31 #define CONF_STATUS0_ECC_CHK BIT(28) 32 #define CONF_STATUS0_INTEG_CHK BIT(27) 33 #define CONF_STATUS0_CSR_DAP_CHK BIT(26) 34 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25) 35 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24) 36 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16) 37 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8) 38 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7)) 39 #define CONF_STATUS0_SUPPORTS_DDR BIT(5) 40 #define CONF_STATUS0_SEC_MASTER BIT(4) 41 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0)) 42 43 #define CONF_STATUS1 0x8 44 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1) 45 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26)) 46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21)) 47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16)) 48 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10)) 49 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5)) 50 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0))) 51 52 #define REV_ID 0xc 53 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20) 54 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8) 55 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4) 56 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0)) 57 58 #define CTRL 0x10 59 #define CTRL_DEV_EN BIT(31) 60 #define CTRL_HALT_EN BIT(30) 61 #define CTRL_MCS BIT(29) 62 #define CTRL_MCS_EN BIT(28) 63 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) 64 #define CTRL_HJ_DISEC BIT(8) 65 #define CTRL_MST_ACK BIT(7) 66 #define CTRL_HJ_ACK BIT(6) 67 #define CTRL_HJ_INIT BIT(5) 68 #define CTRL_MST_INIT BIT(4) 69 #define CTRL_AHDR_OPT BIT(3) 70 #define CTRL_PURE_BUS_MODE 0 71 #define CTRL_MIXED_FAST_BUS_MODE 2 72 #define CTRL_MIXED_SLOW_BUS_MODE 3 73 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) 74 #define THD_DELAY_MAX 3 75 76 #define PRESCL_CTRL0 0x14 77 #define PRESCL_CTRL0_I2C(x) ((x) << 16) 78 #define PRESCL_CTRL0_I3C(x) (x) 79 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0) 80 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0) 81 82 #define PRESCL_CTRL1 0x18 83 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8) 84 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8) 85 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0) 86 #define PRESCL_CTRL1_OD_LOW(x) (x) 87 88 #define MST_IER 0x20 89 #define MST_IDR 0x24 90 #define MST_IMR 0x28 91 #define MST_ICR 0x2c 92 #define MST_ISR 0x30 93 #define MST_INT_HALTED BIT(18) 94 #define MST_INT_MR_DONE BIT(17) 95 #define MST_INT_IMM_COMP BIT(16) 96 #define MST_INT_TX_THR BIT(15) 97 #define MST_INT_TX_OVF BIT(14) 98 #define MST_INT_IBID_THR BIT(12) 99 #define MST_INT_IBID_UNF BIT(11) 100 #define MST_INT_IBIR_THR BIT(10) 101 #define MST_INT_IBIR_UNF BIT(9) 102 #define MST_INT_IBIR_OVF BIT(8) 103 #define MST_INT_RX_THR BIT(7) 104 #define MST_INT_RX_UNF BIT(6) 105 #define MST_INT_CMDD_EMP BIT(5) 106 #define MST_INT_CMDD_THR BIT(4) 107 #define MST_INT_CMDD_OVF BIT(3) 108 #define MST_INT_CMDR_THR BIT(2) 109 #define MST_INT_CMDR_UNF BIT(1) 110 #define MST_INT_CMDR_OVF BIT(0) 111 112 #define MST_STATUS0 0x34 113 #define MST_STATUS0_IDLE BIT(18) 114 #define MST_STATUS0_HALTED BIT(17) 115 #define MST_STATUS0_MASTER_MODE BIT(16) 116 #define MST_STATUS0_TX_FULL BIT(13) 117 #define MST_STATUS0_IBID_FULL BIT(12) 118 #define MST_STATUS0_IBIR_FULL BIT(11) 119 #define MST_STATUS0_RX_FULL BIT(10) 120 #define MST_STATUS0_CMDD_FULL BIT(9) 121 #define MST_STATUS0_CMDR_FULL BIT(8) 122 #define MST_STATUS0_TX_EMP BIT(5) 123 #define MST_STATUS0_IBID_EMP BIT(4) 124 #define MST_STATUS0_IBIR_EMP BIT(3) 125 #define MST_STATUS0_RX_EMP BIT(2) 126 #define MST_STATUS0_CMDD_EMP BIT(1) 127 #define MST_STATUS0_CMDR_EMP BIT(0) 128 129 #define CMDR 0x38 130 #define CMDR_NO_ERROR 0 131 #define CMDR_DDR_PREAMBLE_ERROR 1 132 #define CMDR_DDR_PARITY_ERROR 2 133 #define CMDR_DDR_RX_FIFO_OVF 3 134 #define CMDR_DDR_TX_FIFO_UNF 4 135 #define CMDR_M0_ERROR 5 136 #define CMDR_M1_ERROR 6 137 #define CMDR_M2_ERROR 7 138 #define CMDR_MST_ABORT 8 139 #define CMDR_NACK_RESP 9 140 #define CMDR_INVALID_DA 10 141 #define CMDR_DDR_DROPPED 11 142 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24) 143 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8) 144 #define CMDR_CMDID_HJACK_DISEC 0xfe 145 #define CMDR_CMDID_HJACK_ENTDAA 0xff 146 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0)) 147 148 #define IBIR 0x3c 149 #define IBIR_ACKED BIT(12) 150 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8) 151 #define IBIR_ERROR BIT(7) 152 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2) 153 #define IBIR_TYPE_IBI 0 154 #define IBIR_TYPE_HJ 1 155 #define IBIR_TYPE_MR 2 156 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0)) 157 158 #define SLV_IER 0x40 159 #define SLV_IDR 0x44 160 #define SLV_IMR 0x48 161 #define SLV_ICR 0x4c 162 #define SLV_ISR 0x50 163 #define SLV_INT_TM BIT(20) 164 #define SLV_INT_ERROR BIT(19) 165 #define SLV_INT_EVENT_UP BIT(18) 166 #define SLV_INT_HJ_DONE BIT(17) 167 #define SLV_INT_MR_DONE BIT(16) 168 #define SLV_INT_DA_UPD BIT(15) 169 #define SLV_INT_SDR_FAIL BIT(14) 170 #define SLV_INT_DDR_FAIL BIT(13) 171 #define SLV_INT_M_RD_ABORT BIT(12) 172 #define SLV_INT_DDR_RX_THR BIT(11) 173 #define SLV_INT_DDR_TX_THR BIT(10) 174 #define SLV_INT_SDR_RX_THR BIT(9) 175 #define SLV_INT_SDR_TX_THR BIT(8) 176 #define SLV_INT_DDR_RX_UNF BIT(7) 177 #define SLV_INT_DDR_TX_OVF BIT(6) 178 #define SLV_INT_SDR_RX_UNF BIT(5) 179 #define SLV_INT_SDR_TX_OVF BIT(4) 180 #define SLV_INT_DDR_RD_COMP BIT(3) 181 #define SLV_INT_DDR_WR_COMP BIT(2) 182 #define SLV_INT_SDR_RD_COMP BIT(1) 183 #define SLV_INT_SDR_WR_COMP BIT(0) 184 185 #define SLV_STATUS0 0x54 186 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16) 187 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0)) 188 189 #define SLV_STATUS1 0x58 190 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20) 191 #define SLV_STATUS1_VEN_TM BIT(19) 192 #define SLV_STATUS1_HJ_DIS BIT(18) 193 #define SLV_STATUS1_MR_DIS BIT(17) 194 #define SLV_STATUS1_PROT_ERR BIT(16) 195 #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9) 196 #define SLV_STATUS1_HAS_DA BIT(8) 197 #define SLV_STATUS1_DDR_RX_FULL BIT(7) 198 #define SLV_STATUS1_DDR_TX_FULL BIT(6) 199 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5) 200 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4) 201 #define SLV_STATUS1_SDR_RX_FULL BIT(3) 202 #define SLV_STATUS1_SDR_TX_FULL BIT(2) 203 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1) 204 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0) 205 206 #define CMD0_FIFO 0x60 207 #define CMD0_FIFO_IS_DDR BIT(31) 208 #define CMD0_FIFO_IS_CCC BIT(30) 209 #define CMD0_FIFO_BCH BIT(29) 210 #define XMIT_BURST_STATIC_SUBADDR 0 211 #define XMIT_SINGLE_INC_SUBADDR 1 212 #define XMIT_SINGLE_STATIC_SUBADDR 2 213 #define XMIT_BURST_WITHOUT_SUBADDR 3 214 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27) 215 #define CMD0_FIFO_SBCA BIT(26) 216 #define CMD0_FIFO_RSBC BIT(25) 217 #define CMD0_FIFO_IS_10B BIT(24) 218 #define CMD0_FIFO_PL_LEN(l) ((l) << 12) 219 #define CMD0_FIFO_PL_LEN_MAX 4095 220 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1) 221 #define CMD0_FIFO_RNW BIT(0) 222 223 #define CMD1_FIFO 0x64 224 #define CMD1_FIFO_CMDID(id) ((id) << 24) 225 #define CMD1_FIFO_CSRADDR(a) (a) 226 #define CMD1_FIFO_CCC(id) (id) 227 228 #define TX_FIFO 0x68 229 230 #define IMD_CMD0 0x70 231 #define IMD_CMD0_PL_LEN(l) ((l) << 12) 232 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1) 233 #define IMD_CMD0_RNW BIT(0) 234 235 #define IMD_CMD1 0x74 236 #define IMD_CMD1_CCC(id) (id) 237 238 #define IMD_DATA 0x78 239 #define RX_FIFO 0x80 240 #define IBI_DATA_FIFO 0x84 241 #define SLV_DDR_TX_FIFO 0x88 242 #define SLV_DDR_RX_FIFO 0x8c 243 244 #define CMD_IBI_THR_CTRL 0x90 245 #define IBIR_THR(t) ((t) << 24) 246 #define CMDR_THR(t) ((t) << 16) 247 #define IBI_THR(t) ((t) << 8) 248 #define CMD_THR(t) (t) 249 250 #define TX_RX_THR_CTRL 0x94 251 #define RX_THR(t) ((t) << 16) 252 #define TX_THR(t) (t) 253 254 #define SLV_DDR_TX_RX_THR_CTRL 0x98 255 #define SLV_DDR_RX_THR(t) ((t) << 16) 256 #define SLV_DDR_TX_THR(t) (t) 257 258 #define FLUSH_CTRL 0x9c 259 #define FLUSH_IBI_RESP BIT(23) 260 #define FLUSH_CMD_RESP BIT(22) 261 #define FLUSH_SLV_DDR_RX_FIFO BIT(22) 262 #define FLUSH_SLV_DDR_TX_FIFO BIT(21) 263 #define FLUSH_IMM_FIFO BIT(20) 264 #define FLUSH_IBI_FIFO BIT(19) 265 #define FLUSH_RX_FIFO BIT(18) 266 #define FLUSH_TX_FIFO BIT(17) 267 #define FLUSH_CMD_FIFO BIT(16) 268 269 #define TTO_PRESCL_CTRL0 0xb0 270 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16) 271 #define TTO_PRESCL_CTRL0_DIVA(x) (x) 272 273 #define TTO_PRESCL_CTRL1 0xb4 274 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16) 275 #define TTO_PRESCL_CTRL1_DIVA(x) (x) 276 277 #define DEVS_CTRL 0xb8 278 #define DEVS_CTRL_DEV_CLR_SHIFT 16 279 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16) 280 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev)) 281 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev) 282 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0) 283 #define MAX_DEVS 16 284 285 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10)) 286 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11) 287 #define DEV_ID_RR0_HDR_CAP BIT(10) 288 #define DEV_ID_RR0_IS_I3C BIT(9) 289 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13)) 290 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \ 291 (((a) & GENMASK(9, 7)) << 6)) 292 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \ 293 (((x) >> 6) & GENMASK(9, 7))) 294 295 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10)) 296 #define DEV_ID_RR1_PID_MSB(pid) (pid) 297 298 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10)) 299 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16) 300 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8) 301 #define DEV_ID_RR2_DCR(dcr) (dcr) 302 #define DEV_ID_RR2_LVR(lvr) (lvr) 303 304 #define SIR_MAP(x) (0x180 + ((x) * 4)) 305 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2) 306 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0)) 307 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0)) 308 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0)) 309 #define DEV_ROLE_SLAVE 0 310 #define DEV_ROLE_MASTER 1 311 #define SIR_MAP_DEV_ROLE(role) ((role) << 14) 312 #define SIR_MAP_DEV_SLOW BIT(13) 313 #define SIR_MAP_DEV_PL(l) ((l) << 8) 314 #define SIR_MAP_PL_MAX GENMASK(4, 0) 315 #define SIR_MAP_DEV_DA(a) ((a) << 1) 316 #define SIR_MAP_DEV_ACK BIT(0) 317 318 #define GPIR_WORD(x) (0x200 + ((x) * 4)) 319 #define GPI_REG(val, id) \ 320 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) 321 322 #define GPOR_WORD(x) (0x220 + ((x) * 4)) 323 #define GPO_REG(val, id) \ 324 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) 325 326 #define ASF_INT_STATUS 0x300 327 #define ASF_INT_RAW_STATUS 0x304 328 #define ASF_INT_MASK 0x308 329 #define ASF_INT_TEST 0x30c 330 #define ASF_INT_FATAL_SELECT 0x310 331 #define ASF_INTEGRITY_ERR BIT(6) 332 #define ASF_PROTOCOL_ERR BIT(5) 333 #define ASF_TRANS_TIMEOUT_ERR BIT(4) 334 #define ASF_CSR_ERR BIT(3) 335 #define ASF_DAP_ERR BIT(2) 336 #define ASF_SRAM_UNCORR_ERR BIT(1) 337 #define ASF_SRAM_CORR_ERR BIT(0) 338 339 #define ASF_SRAM_CORR_FAULT_STATUS 0x320 340 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324 341 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24) 342 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0)) 343 344 #define ASF_SRAM_FAULT_STATS 0x328 345 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16) 346 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0)) 347 348 #define ASF_TRANS_TOUT_CTRL 0x330 349 #define ASF_TRANS_TOUT_EN BIT(31) 350 #define ASF_TRANS_TOUT_VAL(x) (x) 351 352 #define ASF_TRANS_TOUT_FAULT_MASK 0x334 353 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338 354 #define ASF_TRANS_TOUT_FAULT_APB BIT(3) 355 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2) 356 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1) 357 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0) 358 359 #define ASF_PROTO_FAULT_MASK 0x340 360 #define ASF_PROTO_FAULT_STATUS 0x344 361 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31) 362 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30) 363 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x)) 364 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15) 365 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14) 366 #define ASF_PROTO_FAULT_M(x) BIT(x) 367 368 struct cdns_i3c_master_caps { 369 u32 cmdfifodepth; 370 u32 cmdrfifodepth; 371 u32 txfifodepth; 372 u32 rxfifodepth; 373 u32 ibirfifodepth; 374 }; 375 376 struct cdns_i3c_cmd { 377 u32 cmd0; 378 u32 cmd1; 379 u32 tx_len; 380 const void *tx_buf; 381 u32 rx_len; 382 void *rx_buf; 383 u32 error; 384 }; 385 386 struct cdns_i3c_xfer { 387 struct list_head node; 388 struct completion comp; 389 int ret; 390 unsigned int ncmds; 391 struct cdns_i3c_cmd cmds[] __counted_by(ncmds); 392 }; 393 394 struct cdns_i3c_data { 395 u8 thd_delay_ns; 396 }; 397 398 struct cdns_i3c_master { 399 struct work_struct hj_work; 400 struct i3c_master_controller base; 401 u32 free_rr_slots; 402 unsigned int maxdevs; 403 struct { 404 unsigned int num_slots; 405 struct i3c_dev_desc **slots; 406 spinlock_t lock; 407 } ibi; 408 struct { 409 struct list_head list; 410 struct cdns_i3c_xfer *cur; 411 spinlock_t lock; 412 } xferqueue; 413 void __iomem *regs; 414 struct clk *sysclk; 415 struct clk *pclk; 416 struct cdns_i3c_master_caps caps; 417 unsigned long i3c_scl_lim; 418 const struct cdns_i3c_data *devdata; 419 }; 420 421 static inline struct cdns_i3c_master * 422 to_cdns_i3c_master(struct i3c_master_controller *master) 423 { 424 return container_of(master, struct cdns_i3c_master, base); 425 } 426 427 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master, 428 const u8 *bytes, int nbytes) 429 { 430 writesl(master->regs + TX_FIFO, bytes, nbytes / 4); 431 if (nbytes & 3) { 432 u32 tmp = 0; 433 434 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3); 435 writesl(master->regs + TX_FIFO, &tmp, 1); 436 } 437 } 438 439 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master, 440 u8 *bytes, int nbytes) 441 { 442 readsl(master->regs + RX_FIFO, bytes, nbytes / 4); 443 if (nbytes & 3) { 444 u32 tmp; 445 446 readsl(master->regs + RX_FIFO, &tmp, 1); 447 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3); 448 } 449 } 450 451 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, 452 const struct i3c_ccc_cmd *cmd) 453 { 454 if (cmd->ndests > 1) 455 return false; 456 457 switch (cmd->id) { 458 case I3C_CCC_ENEC(true): 459 case I3C_CCC_ENEC(false): 460 case I3C_CCC_DISEC(true): 461 case I3C_CCC_DISEC(false): 462 case I3C_CCC_ENTAS(0, true): 463 case I3C_CCC_ENTAS(0, false): 464 case I3C_CCC_RSTDAA(true): 465 case I3C_CCC_RSTDAA(false): 466 case I3C_CCC_ENTDAA: 467 case I3C_CCC_SETMWL(true): 468 case I3C_CCC_SETMWL(false): 469 case I3C_CCC_SETMRL(true): 470 case I3C_CCC_SETMRL(false): 471 case I3C_CCC_DEFSLVS: 472 case I3C_CCC_ENTHDR(0): 473 case I3C_CCC_SETDASA: 474 case I3C_CCC_SETNEWDA: 475 case I3C_CCC_GETMWL: 476 case I3C_CCC_GETMRL: 477 case I3C_CCC_GETPID: 478 case I3C_CCC_GETBCR: 479 case I3C_CCC_GETDCR: 480 case I3C_CCC_GETSTATUS: 481 case I3C_CCC_GETACCMST: 482 case I3C_CCC_GETMXDS: 483 case I3C_CCC_GETHDRCAP: 484 return true; 485 default: 486 break; 487 } 488 489 return false; 490 } 491 492 static int cdns_i3c_master_disable(struct cdns_i3c_master *master) 493 { 494 u32 status; 495 496 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL); 497 498 return readl_poll_timeout(master->regs + MST_STATUS0, status, 499 status & MST_STATUS0_IDLE, 10, 1000000); 500 } 501 502 static void cdns_i3c_master_enable(struct cdns_i3c_master *master) 503 { 504 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL); 505 } 506 507 static struct cdns_i3c_xfer * 508 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds) 509 { 510 struct cdns_i3c_xfer *xfer; 511 512 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 513 if (!xfer) 514 return NULL; 515 516 INIT_LIST_HEAD(&xfer->node); 517 xfer->ncmds = ncmds; 518 xfer->ret = -ETIMEDOUT; 519 520 return xfer; 521 } 522 523 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer) 524 { 525 kfree(xfer); 526 } 527 528 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master) 529 { 530 struct cdns_i3c_xfer *xfer = master->xferqueue.cur; 531 unsigned int i; 532 533 if (!xfer) 534 return; 535 536 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR); 537 for (i = 0; i < xfer->ncmds; i++) { 538 struct cdns_i3c_cmd *cmd = &xfer->cmds[i]; 539 540 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, 541 cmd->tx_len); 542 } 543 544 for (i = 0; i < xfer->ncmds; i++) { 545 struct cdns_i3c_cmd *cmd = &xfer->cmds[i]; 546 547 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i), 548 master->regs + CMD1_FIFO); 549 writel(cmd->cmd0, master->regs + CMD0_FIFO); 550 } 551 552 writel(readl(master->regs + CTRL) | CTRL_MCS, 553 master->regs + CTRL); 554 writel(MST_INT_CMDD_EMP, master->regs + MST_IER); 555 } 556 557 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master, 558 u32 isr) 559 { 560 struct cdns_i3c_xfer *xfer = master->xferqueue.cur; 561 int i, ret = 0; 562 u32 status0; 563 564 if (!xfer) 565 return; 566 567 if (!(isr & MST_INT_CMDD_EMP)) 568 return; 569 570 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); 571 572 for (status0 = readl(master->regs + MST_STATUS0); 573 !(status0 & MST_STATUS0_CMDR_EMP); 574 status0 = readl(master->regs + MST_STATUS0)) { 575 struct cdns_i3c_cmd *cmd; 576 u32 cmdr, rx_len, id; 577 578 cmdr = readl(master->regs + CMDR); 579 id = CMDR_CMDID(cmdr); 580 if (id == CMDR_CMDID_HJACK_DISEC || 581 id == CMDR_CMDID_HJACK_ENTDAA || 582 WARN_ON(id >= xfer->ncmds)) 583 continue; 584 585 cmd = &xfer->cmds[CMDR_CMDID(cmdr)]; 586 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len); 587 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len); 588 cmd->error = CMDR_ERROR(cmdr); 589 } 590 591 for (i = 0; i < xfer->ncmds; i++) { 592 switch (xfer->cmds[i].error) { 593 case CMDR_NO_ERROR: 594 break; 595 596 case CMDR_DDR_PREAMBLE_ERROR: 597 case CMDR_DDR_PARITY_ERROR: 598 case CMDR_M0_ERROR: 599 case CMDR_M1_ERROR: 600 case CMDR_M2_ERROR: 601 case CMDR_MST_ABORT: 602 case CMDR_NACK_RESP: 603 case CMDR_DDR_DROPPED: 604 ret = -EIO; 605 break; 606 607 case CMDR_DDR_RX_FIFO_OVF: 608 case CMDR_DDR_TX_FIFO_UNF: 609 ret = -ENOSPC; 610 break; 611 612 case CMDR_INVALID_DA: 613 default: 614 ret = -EINVAL; 615 break; 616 } 617 } 618 619 xfer->ret = ret; 620 complete(&xfer->comp); 621 622 xfer = list_first_entry_or_null(&master->xferqueue.list, 623 struct cdns_i3c_xfer, node); 624 if (xfer) 625 list_del_init(&xfer->node); 626 627 master->xferqueue.cur = xfer; 628 cdns_i3c_master_start_xfer_locked(master); 629 } 630 631 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master, 632 struct cdns_i3c_xfer *xfer) 633 { 634 unsigned long flags; 635 636 init_completion(&xfer->comp); 637 spin_lock_irqsave(&master->xferqueue.lock, flags); 638 if (master->xferqueue.cur) { 639 list_add_tail(&xfer->node, &master->xferqueue.list); 640 } else { 641 master->xferqueue.cur = xfer; 642 cdns_i3c_master_start_xfer_locked(master); 643 } 644 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 645 } 646 647 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master, 648 struct cdns_i3c_xfer *xfer) 649 { 650 unsigned long flags; 651 652 spin_lock_irqsave(&master->xferqueue.lock, flags); 653 if (master->xferqueue.cur == xfer) { 654 u32 status; 655 656 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, 657 master->regs + CTRL); 658 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status, 659 status & MST_STATUS0_IDLE, 10, 660 1000000); 661 master->xferqueue.cur = NULL; 662 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | 663 FLUSH_CMD_RESP, 664 master->regs + FLUSH_CTRL); 665 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); 666 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, 667 master->regs + CTRL); 668 } else { 669 list_del_init(&xfer->node); 670 } 671 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 672 } 673 674 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd) 675 { 676 switch (cmd->error) { 677 case CMDR_M0_ERROR: 678 return I3C_ERROR_M0; 679 680 case CMDR_M1_ERROR: 681 return I3C_ERROR_M1; 682 683 case CMDR_M2_ERROR: 684 case CMDR_NACK_RESP: 685 return I3C_ERROR_M2; 686 687 default: 688 break; 689 } 690 691 return I3C_ERROR_UNKNOWN; 692 } 693 694 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 695 struct i3c_ccc_cmd *cmd) 696 { 697 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 698 struct cdns_i3c_xfer *xfer; 699 struct cdns_i3c_cmd *ccmd; 700 int ret; 701 702 xfer = cdns_i3c_master_alloc_xfer(master, 1); 703 if (!xfer) 704 return -ENOMEM; 705 706 ccmd = xfer->cmds; 707 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id); 708 ccmd->cmd0 = CMD0_FIFO_IS_CCC | 709 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); 710 711 if (cmd->id & I3C_CCC_DIRECT) 712 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr); 713 714 if (cmd->rnw) { 715 ccmd->cmd0 |= CMD0_FIFO_RNW; 716 ccmd->rx_buf = cmd->dests[0].payload.data; 717 ccmd->rx_len = cmd->dests[0].payload.len; 718 } else { 719 ccmd->tx_buf = cmd->dests[0].payload.data; 720 ccmd->tx_len = cmd->dests[0].payload.len; 721 } 722 723 cdns_i3c_master_queue_xfer(master, xfer); 724 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 725 cdns_i3c_master_unqueue_xfer(master, xfer); 726 727 ret = xfer->ret; 728 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]); 729 cdns_i3c_master_free_xfer(xfer); 730 731 return ret; 732 } 733 734 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 735 struct i3c_priv_xfer *xfers, 736 int nxfers) 737 { 738 struct i3c_master_controller *m = i3c_dev_get_master(dev); 739 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 740 int txslots = 0, rxslots = 0, i, ret; 741 struct cdns_i3c_xfer *cdns_xfer; 742 743 for (i = 0; i < nxfers; i++) { 744 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) 745 return -ENOTSUPP; 746 } 747 748 if (!nxfers) 749 return 0; 750 751 if (nxfers > master->caps.cmdfifodepth || 752 nxfers > master->caps.cmdrfifodepth) 753 return -ENOTSUPP; 754 755 /* 756 * First make sure that all transactions (block of transfers separated 757 * by a STOP marker) fit in the FIFOs. 758 */ 759 for (i = 0; i < nxfers; i++) { 760 if (xfers[i].rnw) 761 rxslots += DIV_ROUND_UP(xfers[i].len, 4); 762 else 763 txslots += DIV_ROUND_UP(xfers[i].len, 4); 764 } 765 766 if (rxslots > master->caps.rxfifodepth || 767 txslots > master->caps.txfifodepth) 768 return -ENOTSUPP; 769 770 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers); 771 if (!cdns_xfer) 772 return -ENOMEM; 773 774 for (i = 0; i < nxfers; i++) { 775 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i]; 776 u32 pl_len = xfers[i].len; 777 778 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) | 779 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); 780 781 if (xfers[i].rnw) { 782 ccmd->cmd0 |= CMD0_FIFO_RNW; 783 ccmd->rx_buf = xfers[i].data.in; 784 ccmd->rx_len = xfers[i].len; 785 pl_len++; 786 } else { 787 ccmd->tx_buf = xfers[i].data.out; 788 ccmd->tx_len = xfers[i].len; 789 } 790 791 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len); 792 793 if (i < nxfers - 1) 794 ccmd->cmd0 |= CMD0_FIFO_RSBC; 795 796 if (!i) 797 ccmd->cmd0 |= CMD0_FIFO_BCH; 798 } 799 800 cdns_i3c_master_queue_xfer(master, cdns_xfer); 801 if (!wait_for_completion_timeout(&cdns_xfer->comp, 802 msecs_to_jiffies(1000))) 803 cdns_i3c_master_unqueue_xfer(master, cdns_xfer); 804 805 ret = cdns_xfer->ret; 806 807 for (i = 0; i < nxfers; i++) 808 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]); 809 810 cdns_i3c_master_free_xfer(cdns_xfer); 811 812 return ret; 813 } 814 815 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 816 struct i2c_msg *xfers, int nxfers) 817 { 818 struct i3c_master_controller *m = i2c_dev_get_master(dev); 819 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 820 unsigned int nrxwords = 0, ntxwords = 0; 821 struct cdns_i3c_xfer *xfer; 822 int i, ret = 0; 823 824 if (nxfers > master->caps.cmdfifodepth) 825 return -ENOTSUPP; 826 827 for (i = 0; i < nxfers; i++) { 828 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) 829 return -ENOTSUPP; 830 831 if (xfers[i].flags & I2C_M_RD) 832 nrxwords += DIV_ROUND_UP(xfers[i].len, 4); 833 else 834 ntxwords += DIV_ROUND_UP(xfers[i].len, 4); 835 } 836 837 if (ntxwords > master->caps.txfifodepth || 838 nrxwords > master->caps.rxfifodepth) 839 return -ENOTSUPP; 840 841 xfer = cdns_i3c_master_alloc_xfer(master, nxfers); 842 if (!xfer) 843 return -ENOMEM; 844 845 for (i = 0; i < nxfers; i++) { 846 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i]; 847 848 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) | 849 CMD0_FIFO_PL_LEN(xfers[i].len) | 850 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); 851 852 if (xfers[i].flags & I2C_M_TEN) 853 ccmd->cmd0 |= CMD0_FIFO_IS_10B; 854 855 if (xfers[i].flags & I2C_M_RD) { 856 ccmd->cmd0 |= CMD0_FIFO_RNW; 857 ccmd->rx_buf = xfers[i].buf; 858 ccmd->rx_len = xfers[i].len; 859 } else { 860 ccmd->tx_buf = xfers[i].buf; 861 ccmd->tx_len = xfers[i].len; 862 } 863 } 864 865 cdns_i3c_master_queue_xfer(master, xfer); 866 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 867 cdns_i3c_master_unqueue_xfer(master, xfer); 868 869 ret = xfer->ret; 870 cdns_i3c_master_free_xfer(xfer); 871 872 return ret; 873 } 874 875 struct cdns_i3c_i2c_dev_data { 876 u16 id; 877 s16 ibi; 878 struct i3c_generic_ibi_pool *ibi_pool; 879 }; 880 881 static u32 prepare_rr0_dev_address(u32 addr) 882 { 883 u32 ret = (addr << 1) & 0xff; 884 885 /* RR0[7:1] = addr[6:0] */ 886 ret |= (addr & GENMASK(6, 0)) << 1; 887 888 /* RR0[15:13] = addr[9:7] */ 889 ret |= (addr & GENMASK(9, 7)) << 6; 890 891 /* RR0[0] = ~XOR(addr[6:0]) */ 892 ret |= parity8(addr & 0x7f) ? 0 : BIT(0); 893 894 return ret; 895 } 896 897 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev) 898 { 899 struct i3c_master_controller *m = i3c_dev_get_master(dev); 900 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 901 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 902 u32 rr; 903 904 rr = prepare_rr0_dev_address(dev->info.dyn_addr ? 905 dev->info.dyn_addr : 906 dev->info.static_addr); 907 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id)); 908 } 909 910 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master, 911 u8 dyn_addr) 912 { 913 unsigned long activedevs; 914 u32 rr; 915 int i; 916 917 if (!dyn_addr) { 918 if (!master->free_rr_slots) 919 return -ENOSPC; 920 921 return ffs(master->free_rr_slots) - 1; 922 } 923 924 activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; 925 activedevs &= ~BIT(0); 926 927 for_each_set_bit(i, &activedevs, master->maxdevs + 1) { 928 rr = readl(master->regs + DEV_ID_RR0(i)); 929 if (!(rr & DEV_ID_RR0_IS_I3C) || 930 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr) 931 continue; 932 933 return i; 934 } 935 936 return -EINVAL; 937 } 938 939 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 940 u8 old_dyn_addr) 941 { 942 cdns_i3c_master_upd_i3c_addr(dev); 943 944 return 0; 945 } 946 947 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 948 { 949 struct i3c_master_controller *m = i3c_dev_get_master(dev); 950 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 951 struct cdns_i3c_i2c_dev_data *data; 952 int slot; 953 954 data = kzalloc(sizeof(*data), GFP_KERNEL); 955 if (!data) 956 return -ENOMEM; 957 958 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr); 959 if (slot < 0) { 960 kfree(data); 961 return slot; 962 } 963 964 data->ibi = -1; 965 data->id = slot; 966 i3c_dev_set_master_data(dev, data); 967 master->free_rr_slots &= ~BIT(slot); 968 969 if (!dev->info.dyn_addr) { 970 cdns_i3c_master_upd_i3c_addr(dev); 971 writel(readl(master->regs + DEVS_CTRL) | 972 DEVS_CTRL_DEV_ACTIVE(data->id), 973 master->regs + DEVS_CTRL); 974 } 975 976 return 0; 977 } 978 979 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 980 { 981 struct i3c_master_controller *m = i3c_dev_get_master(dev); 982 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 983 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 984 985 writel(readl(master->regs + DEVS_CTRL) | 986 DEVS_CTRL_DEV_CLR(data->id), 987 master->regs + DEVS_CTRL); 988 989 i3c_dev_set_master_data(dev, NULL); 990 master->free_rr_slots |= BIT(data->id); 991 kfree(data); 992 } 993 994 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 995 { 996 struct i3c_master_controller *m = i2c_dev_get_master(dev); 997 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 998 struct cdns_i3c_i2c_dev_data *data; 999 int slot; 1000 1001 slot = cdns_i3c_master_get_rr_slot(master, 0); 1002 if (slot < 0) 1003 return slot; 1004 1005 data = kzalloc(sizeof(*data), GFP_KERNEL); 1006 if (!data) 1007 return -ENOMEM; 1008 1009 data->id = slot; 1010 master->free_rr_slots &= ~BIT(slot); 1011 i2c_dev_set_master_data(dev, data); 1012 1013 writel(prepare_rr0_dev_address(dev->addr), 1014 master->regs + DEV_ID_RR0(data->id)); 1015 writel(dev->lvr, master->regs + DEV_ID_RR2(data->id)); 1016 writel(readl(master->regs + DEVS_CTRL) | 1017 DEVS_CTRL_DEV_ACTIVE(data->id), 1018 master->regs + DEVS_CTRL); 1019 1020 return 0; 1021 } 1022 1023 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 1024 { 1025 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1026 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1027 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1028 1029 writel(readl(master->regs + DEVS_CTRL) | 1030 DEVS_CTRL_DEV_CLR(data->id), 1031 master->regs + DEVS_CTRL); 1032 master->free_rr_slots |= BIT(data->id); 1033 1034 i2c_dev_set_master_data(dev, NULL); 1035 kfree(data); 1036 } 1037 1038 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m) 1039 { 1040 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1041 1042 cdns_i3c_master_disable(master); 1043 } 1044 1045 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master, 1046 unsigned int slot, 1047 struct i3c_device_info *info) 1048 { 1049 u32 rr; 1050 1051 memset(info, 0, sizeof(*info)); 1052 rr = readl(master->regs + DEV_ID_RR0(slot)); 1053 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr); 1054 rr = readl(master->regs + DEV_ID_RR2(slot)); 1055 info->dcr = rr; 1056 info->bcr = rr >> 8; 1057 info->pid = rr >> 16; 1058 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16; 1059 } 1060 1061 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master) 1062 { 1063 struct i3c_master_controller *m = &master->base; 1064 unsigned long i3c_lim_period, pres_step, ncycles; 1065 struct i3c_bus *bus = i3c_master_get_bus(m); 1066 unsigned long new_i3c_scl_lim = 0; 1067 struct i3c_dev_desc *dev; 1068 u32 prescl1, ctrl; 1069 1070 i3c_bus_for_each_i3cdev(bus, dev) { 1071 unsigned long max_fscl; 1072 1073 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds), 1074 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds)); 1075 switch (max_fscl) { 1076 case I3C_SDR1_FSCL_8MHZ: 1077 max_fscl = 8000000; 1078 break; 1079 case I3C_SDR2_FSCL_6MHZ: 1080 max_fscl = 6000000; 1081 break; 1082 case I3C_SDR3_FSCL_4MHZ: 1083 max_fscl = 4000000; 1084 break; 1085 case I3C_SDR4_FSCL_2MHZ: 1086 max_fscl = 2000000; 1087 break; 1088 case I3C_SDR0_FSCL_MAX: 1089 default: 1090 max_fscl = 0; 1091 break; 1092 } 1093 1094 if (max_fscl && 1095 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim)) 1096 new_i3c_scl_lim = max_fscl; 1097 } 1098 1099 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */ 1100 if (new_i3c_scl_lim == master->i3c_scl_lim) 1101 return; 1102 master->i3c_scl_lim = new_i3c_scl_lim; 1103 if (!new_i3c_scl_lim) 1104 return; 1105 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4); 1106 1107 /* Configure PP_LOW to meet I3C slave limitations. */ 1108 prescl1 = readl(master->regs + PRESCL_CTRL1) & 1109 ~PRESCL_CTRL1_PP_LOW_MASK; 1110 ctrl = readl(master->regs + CTRL); 1111 1112 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim); 1113 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step); 1114 if (ncycles < 4) 1115 ncycles = 0; 1116 else 1117 ncycles -= 4; 1118 1119 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles); 1120 1121 /* Disable I3C master before updating PRESCL_CTRL1. */ 1122 if (ctrl & CTRL_DEV_EN) 1123 cdns_i3c_master_disable(master); 1124 1125 writel(prescl1, master->regs + PRESCL_CTRL1); 1126 1127 if (ctrl & CTRL_DEV_EN) 1128 cdns_i3c_master_enable(master); 1129 } 1130 1131 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) 1132 { 1133 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1134 unsigned long olddevs, newdevs; 1135 int ret, slot; 1136 u8 addrs[MAX_DEVS] = { }; 1137 u8 last_addr = 0; 1138 1139 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; 1140 olddevs |= BIT(0); 1141 1142 /* Prepare RR slots before launching DAA. */ 1143 for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) { 1144 ret = i3c_master_get_free_addr(m, last_addr + 1); 1145 if (ret < 0) 1146 return -ENOSPC; 1147 1148 last_addr = ret; 1149 addrs[slot] = last_addr; 1150 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C, 1151 master->regs + DEV_ID_RR0(slot)); 1152 writel(0, master->regs + DEV_ID_RR1(slot)); 1153 writel(0, master->regs + DEV_ID_RR2(slot)); 1154 } 1155 1156 ret = i3c_master_entdaa_locked(&master->base); 1157 if (ret && ret != I3C_ERROR_M2) 1158 return ret; 1159 1160 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; 1161 newdevs &= ~olddevs; 1162 1163 /* 1164 * Clear all retaining registers filled during DAA. We already 1165 * have the addressed assigned to them in the addrs array. 1166 */ 1167 for_each_set_bit(slot, &newdevs, master->maxdevs + 1) 1168 i3c_master_add_i3c_dev_locked(m, addrs[slot]); 1169 1170 /* 1171 * Clear slots that ended up not being used. Can be caused by I3C 1172 * device creation failure or when the I3C device was already known 1173 * by the system but with a different address (in this case the device 1174 * already has a slot and does not need a new one). 1175 */ 1176 writel(readl(master->regs + DEVS_CTRL) | 1177 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT, 1178 master->regs + DEVS_CTRL); 1179 1180 i3c_master_defslvs_locked(&master->base); 1181 1182 cdns_i3c_master_upd_i3c_scl_lim(master); 1183 1184 /* Unmask Hot-Join and Mastership request interrupts. */ 1185 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR, 1186 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR); 1187 1188 return 0; 1189 } 1190 1191 static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master) 1192 { 1193 unsigned long sysclk_rate = clk_get_rate(master->sysclk); 1194 u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, 1195 (NSEC_PER_SEC / sysclk_rate)); 1196 1197 /* Every value greater than 3 is not valid. */ 1198 if (thd_delay > THD_DELAY_MAX) 1199 thd_delay = THD_DELAY_MAX; 1200 1201 /* CTLR_THD_DEL value is encoded. */ 1202 return (THD_DELAY_MAX - thd_delay); 1203 } 1204 1205 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) 1206 { 1207 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1208 unsigned long pres_step, sysclk_rate, max_i2cfreq; 1209 struct i3c_bus *bus = i3c_master_get_bus(m); 1210 u32 ctrl, prescl0, prescl1, pres, low; 1211 struct i3c_device_info info = { }; 1212 int ret, ncycles; 1213 1214 switch (bus->mode) { 1215 case I3C_BUS_MODE_PURE: 1216 ctrl = CTRL_PURE_BUS_MODE; 1217 break; 1218 1219 case I3C_BUS_MODE_MIXED_FAST: 1220 ctrl = CTRL_MIXED_FAST_BUS_MODE; 1221 break; 1222 1223 case I3C_BUS_MODE_MIXED_SLOW: 1224 ctrl = CTRL_MIXED_SLOW_BUS_MODE; 1225 break; 1226 1227 default: 1228 return -EINVAL; 1229 } 1230 1231 sysclk_rate = clk_get_rate(master->sysclk); 1232 if (!sysclk_rate) 1233 return -EINVAL; 1234 1235 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1; 1236 if (pres > PRESCL_CTRL0_I3C_MAX) 1237 return -ERANGE; 1238 1239 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4); 1240 1241 prescl0 = PRESCL_CTRL0_I3C(pres); 1242 1243 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2; 1244 prescl1 = PRESCL_CTRL1_OD_LOW(low); 1245 1246 max_i2cfreq = bus->scl_rate.i2c; 1247 1248 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1; 1249 if (pres > PRESCL_CTRL0_I2C_MAX) 1250 return -ERANGE; 1251 1252 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5); 1253 1254 prescl0 |= PRESCL_CTRL0_I2C(pres); 1255 writel(prescl0, master->regs + PRESCL_CTRL0); 1256 1257 /* Calculate OD and PP low. */ 1258 pres_step = 1000000000 / (bus->scl_rate.i3c * 4); 1259 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2; 1260 if (ncycles < 0) 1261 ncycles = 0; 1262 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles); 1263 writel(prescl1, master->regs + PRESCL_CTRL1); 1264 1265 /* Get an address for the master. */ 1266 ret = i3c_master_get_free_addr(m, 0); 1267 if (ret < 0) 1268 return ret; 1269 1270 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C, 1271 master->regs + DEV_ID_RR0(0)); 1272 1273 cdns_i3c_master_dev_rr_to_info(master, 0, &info); 1274 if (info.bcr & I3C_BCR_HDR_CAP) 1275 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR); 1276 1277 ret = i3c_master_set_info(&master->base, &info); 1278 if (ret) 1279 return ret; 1280 1281 /* 1282 * Enable Hot-Join, and, when a Hot-Join request happens, disable all 1283 * events coming from this device. 1284 * 1285 * We will issue ENTDAA afterwards from the threaded IRQ handler. 1286 */ 1287 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; 1288 1289 /* 1290 * Configure data hold delay based on device-specific data. 1291 * 1292 * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on 1293 * master output. This setting allows to meet this timing on master's 1294 * SoC outputs, regardless of PCB balancing. 1295 */ 1296 ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master)); 1297 writel(ctrl, master->regs + CTRL); 1298 1299 cdns_i3c_master_enable(master); 1300 1301 return 0; 1302 } 1303 1304 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master, 1305 u32 ibir) 1306 { 1307 struct cdns_i3c_i2c_dev_data *data; 1308 bool data_consumed = false; 1309 struct i3c_ibi_slot *slot; 1310 u32 id = IBIR_SLVID(ibir); 1311 struct i3c_dev_desc *dev; 1312 size_t nbytes; 1313 u8 *buf; 1314 1315 /* 1316 * FIXME: maybe we should report the FIFO OVF errors to the upper 1317 * layer. 1318 */ 1319 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR)) 1320 goto out; 1321 1322 dev = master->ibi.slots[id]; 1323 spin_lock(&master->ibi.lock); 1324 1325 data = i3c_dev_get_master_data(dev); 1326 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 1327 if (!slot) 1328 goto out_unlock; 1329 1330 buf = slot->data; 1331 1332 nbytes = IBIR_XFER_BYTES(ibir); 1333 readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4); 1334 if (nbytes % 3) { 1335 u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO); 1336 1337 memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3); 1338 } 1339 1340 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir), 1341 dev->ibi->max_payload_len); 1342 i3c_master_queue_ibi(dev, slot); 1343 data_consumed = true; 1344 1345 out_unlock: 1346 spin_unlock(&master->ibi.lock); 1347 1348 out: 1349 /* Consume data from the FIFO if it's not been done already. */ 1350 if (!data_consumed) { 1351 int i; 1352 1353 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4) 1354 readl(master->regs + IBI_DATA_FIFO); 1355 } 1356 } 1357 1358 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master) 1359 { 1360 u32 status0; 1361 1362 writel(MST_INT_IBIR_THR, master->regs + MST_ICR); 1363 1364 for (status0 = readl(master->regs + MST_STATUS0); 1365 !(status0 & MST_STATUS0_IBIR_EMP); 1366 status0 = readl(master->regs + MST_STATUS0)) { 1367 u32 ibir = readl(master->regs + IBIR); 1368 1369 switch (IBIR_TYPE(ibir)) { 1370 case IBIR_TYPE_IBI: 1371 cdns_i3c_master_handle_ibi(master, ibir); 1372 break; 1373 1374 case IBIR_TYPE_HJ: 1375 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); 1376 queue_work(master->base.wq, &master->hj_work); 1377 break; 1378 1379 case IBIR_TYPE_MR: 1380 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); 1381 break; 1382 1383 default: 1384 break; 1385 } 1386 } 1387 } 1388 1389 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data) 1390 { 1391 struct cdns_i3c_master *master = data; 1392 u32 status; 1393 1394 status = readl(master->regs + MST_ISR); 1395 if (!(status & readl(master->regs + MST_IMR))) 1396 return IRQ_NONE; 1397 1398 spin_lock(&master->xferqueue.lock); 1399 cdns_i3c_master_end_xfer_locked(master, status); 1400 spin_unlock(&master->xferqueue.lock); 1401 1402 if (status & MST_INT_IBIR_THR) 1403 cnds_i3c_master_demux_ibis(master); 1404 1405 return IRQ_HANDLED; 1406 } 1407 1408 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1409 { 1410 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1411 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1412 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1413 unsigned long flags; 1414 u32 sirmap; 1415 int ret; 1416 1417 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, 1418 I3C_CCC_EVENT_SIR); 1419 if (ret) 1420 return ret; 1421 1422 spin_lock_irqsave(&master->ibi.lock, flags); 1423 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1424 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1425 sirmap |= SIR_MAP_DEV_CONF(data->ibi, 1426 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); 1427 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1428 spin_unlock_irqrestore(&master->ibi.lock, flags); 1429 1430 return ret; 1431 } 1432 1433 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1434 { 1435 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1436 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1437 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1438 unsigned long flags; 1439 u32 sircfg, sirmap; 1440 int ret; 1441 1442 spin_lock_irqsave(&master->ibi.lock, flags); 1443 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1444 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1445 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) | 1446 SIR_MAP_DEV_DA(dev->info.dyn_addr) | 1447 SIR_MAP_DEV_PL(dev->info.max_ibi_len) | 1448 SIR_MAP_DEV_ACK; 1449 1450 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) 1451 sircfg |= SIR_MAP_DEV_SLOW; 1452 1453 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg); 1454 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1455 spin_unlock_irqrestore(&master->ibi.lock, flags); 1456 1457 ret = i3c_master_enec_locked(m, dev->info.dyn_addr, 1458 I3C_CCC_EVENT_SIR); 1459 if (ret) { 1460 spin_lock_irqsave(&master->ibi.lock, flags); 1461 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1462 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1463 sirmap |= SIR_MAP_DEV_CONF(data->ibi, 1464 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); 1465 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1466 spin_unlock_irqrestore(&master->ibi.lock, flags); 1467 } 1468 1469 return ret; 1470 } 1471 1472 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1473 const struct i3c_ibi_setup *req) 1474 { 1475 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1476 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1477 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1478 unsigned long flags; 1479 unsigned int i; 1480 1481 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1482 if (IS_ERR(data->ibi_pool)) 1483 return PTR_ERR(data->ibi_pool); 1484 1485 spin_lock_irqsave(&master->ibi.lock, flags); 1486 for (i = 0; i < master->ibi.num_slots; i++) { 1487 if (!master->ibi.slots[i]) { 1488 data->ibi = i; 1489 master->ibi.slots[i] = dev; 1490 break; 1491 } 1492 } 1493 spin_unlock_irqrestore(&master->ibi.lock, flags); 1494 1495 if (i < master->ibi.num_slots) 1496 return 0; 1497 1498 i3c_generic_ibi_free_pool(data->ibi_pool); 1499 data->ibi_pool = NULL; 1500 1501 return -ENOSPC; 1502 } 1503 1504 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1505 { 1506 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1507 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1508 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1509 unsigned long flags; 1510 1511 spin_lock_irqsave(&master->ibi.lock, flags); 1512 master->ibi.slots[data->ibi] = NULL; 1513 data->ibi = -1; 1514 spin_unlock_irqrestore(&master->ibi.lock, flags); 1515 1516 i3c_generic_ibi_free_pool(data->ibi_pool); 1517 } 1518 1519 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1520 struct i3c_ibi_slot *slot) 1521 { 1522 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1523 1524 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1525 } 1526 1527 static const struct i3c_master_controller_ops cdns_i3c_master_ops = { 1528 .bus_init = cdns_i3c_master_bus_init, 1529 .bus_cleanup = cdns_i3c_master_bus_cleanup, 1530 .do_daa = cdns_i3c_master_do_daa, 1531 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev, 1532 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev, 1533 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev, 1534 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev, 1535 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev, 1536 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd, 1537 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd, 1538 .priv_xfers = cdns_i3c_master_priv_xfers, 1539 .i2c_xfers = cdns_i3c_master_i2c_xfers, 1540 .enable_ibi = cdns_i3c_master_enable_ibi, 1541 .disable_ibi = cdns_i3c_master_disable_ibi, 1542 .request_ibi = cdns_i3c_master_request_ibi, 1543 .free_ibi = cdns_i3c_master_free_ibi, 1544 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot, 1545 }; 1546 1547 static void cdns_i3c_master_hj(struct work_struct *work) 1548 { 1549 struct cdns_i3c_master *master = container_of(work, 1550 struct cdns_i3c_master, 1551 hj_work); 1552 1553 i3c_master_do_daa(&master->base); 1554 } 1555 1556 static struct cdns_i3c_data cdns_i3c_devdata = { 1557 .thd_delay_ns = 10, 1558 }; 1559 1560 static const struct of_device_id cdns_i3c_master_of_ids[] = { 1561 { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, 1562 { /* sentinel */ }, 1563 }; 1564 MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids); 1565 1566 static int cdns_i3c_master_probe(struct platform_device *pdev) 1567 { 1568 struct cdns_i3c_master *master; 1569 int ret, irq; 1570 u32 val; 1571 1572 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); 1573 if (!master) 1574 return -ENOMEM; 1575 1576 master->devdata = of_device_get_match_data(&pdev->dev); 1577 if (!master->devdata) 1578 return -EINVAL; 1579 1580 master->regs = devm_platform_ioremap_resource(pdev, 0); 1581 if (IS_ERR(master->regs)) 1582 return PTR_ERR(master->regs); 1583 1584 master->pclk = devm_clk_get(&pdev->dev, "pclk"); 1585 if (IS_ERR(master->pclk)) 1586 return PTR_ERR(master->pclk); 1587 1588 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1589 if (IS_ERR(master->sysclk)) 1590 return PTR_ERR(master->sysclk); 1591 1592 irq = platform_get_irq(pdev, 0); 1593 if (irq < 0) 1594 return irq; 1595 1596 ret = clk_prepare_enable(master->pclk); 1597 if (ret) 1598 return ret; 1599 1600 ret = clk_prepare_enable(master->sysclk); 1601 if (ret) 1602 goto err_disable_pclk; 1603 1604 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) { 1605 ret = -EINVAL; 1606 goto err_disable_sysclk; 1607 } 1608 1609 spin_lock_init(&master->xferqueue.lock); 1610 INIT_LIST_HEAD(&master->xferqueue.list); 1611 1612 INIT_WORK(&master->hj_work, cdns_i3c_master_hj); 1613 writel(0xffffffff, master->regs + MST_IDR); 1614 writel(0xffffffff, master->regs + SLV_IDR); 1615 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0, 1616 dev_name(&pdev->dev), master); 1617 if (ret) 1618 goto err_disable_sysclk; 1619 1620 platform_set_drvdata(pdev, master); 1621 1622 val = readl(master->regs + CONF_STATUS0); 1623 1624 /* Device ID0 is reserved to describe this master. */ 1625 master->maxdevs = CONF_STATUS0_DEVS_NUM(val); 1626 master->free_rr_slots = GENMASK(master->maxdevs, 1); 1627 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val); 1628 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val); 1629 1630 val = readl(master->regs + CONF_STATUS1); 1631 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val); 1632 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val); 1633 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val); 1634 1635 spin_lock_init(&master->ibi.lock); 1636 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val); 1637 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, 1638 sizeof(*master->ibi.slots), 1639 GFP_KERNEL); 1640 if (!master->ibi.slots) { 1641 ret = -ENOMEM; 1642 goto err_disable_sysclk; 1643 } 1644 1645 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); 1646 writel(MST_INT_IBIR_THR, master->regs + MST_IER); 1647 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL); 1648 1649 ret = i3c_master_register(&master->base, &pdev->dev, 1650 &cdns_i3c_master_ops, false); 1651 if (ret) 1652 goto err_disable_sysclk; 1653 1654 return 0; 1655 1656 err_disable_sysclk: 1657 clk_disable_unprepare(master->sysclk); 1658 1659 err_disable_pclk: 1660 clk_disable_unprepare(master->pclk); 1661 1662 return ret; 1663 } 1664 1665 static void cdns_i3c_master_remove(struct platform_device *pdev) 1666 { 1667 struct cdns_i3c_master *master = platform_get_drvdata(pdev); 1668 1669 cancel_work_sync(&master->hj_work); 1670 i3c_master_unregister(&master->base); 1671 1672 clk_disable_unprepare(master->sysclk); 1673 clk_disable_unprepare(master->pclk); 1674 } 1675 1676 static struct platform_driver cdns_i3c_master = { 1677 .probe = cdns_i3c_master_probe, 1678 .remove = cdns_i3c_master_remove, 1679 .driver = { 1680 .name = "cdns-i3c-master", 1681 .of_match_table = cdns_i3c_master_of_ids, 1682 }, 1683 }; 1684 module_platform_driver(cdns_i3c_master); 1685 1686 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>"); 1687 MODULE_DESCRIPTION("Cadence I3C master driver"); 1688 MODULE_LICENSE("GPL v2"); 1689 MODULE_ALIAS("platform:cdns-i3c-master"); 1690