1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
11 #include <linux/string_choices.h>
12 #include "bus.h"
13 #include "irq.h"
14 #include "sysfs_local.h"
15 
16 static DEFINE_IDA(sdw_bus_ida);
17 
sdw_get_id(struct sdw_bus * bus)18 static int sdw_get_id(struct sdw_bus *bus)
19 {
20 	int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
21 
22 	if (rc < 0)
23 		return rc;
24 
25 	bus->id = rc;
26 
27 	if (bus->controller_id == -1)
28 		bus->controller_id = rc;
29 
30 	return 0;
31 }
32 
33 /**
34  * sdw_bus_master_add() - add a bus Master instance
35  * @bus: bus instance
36  * @parent: parent device
37  * @fwnode: firmware node handle
38  *
39  * Initializes the bus instance, read properties and create child
40  * devices.
41  */
sdw_bus_master_add(struct sdw_bus * bus,struct device * parent,struct fwnode_handle * fwnode)42 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
43 		       struct fwnode_handle *fwnode)
44 {
45 	struct sdw_master_prop *prop = NULL;
46 	int ret;
47 
48 	if (!parent) {
49 		pr_err("SoundWire parent device is not set\n");
50 		return -ENODEV;
51 	}
52 
53 	ret = sdw_get_id(bus);
54 	if (ret < 0) {
55 		dev_err(parent, "Failed to get bus id\n");
56 		return ret;
57 	}
58 
59 	ret = sdw_master_device_add(bus, parent, fwnode);
60 	if (ret < 0) {
61 		dev_err(parent, "Failed to add master device at link %d\n",
62 			bus->link_id);
63 		return ret;
64 	}
65 
66 	if (!bus->ops) {
67 		dev_err(bus->dev, "SoundWire Bus ops are not set\n");
68 		return -EINVAL;
69 	}
70 
71 	if (!bus->compute_params) {
72 		dev_err(bus->dev,
73 			"Bandwidth allocation not configured, compute_params no set\n");
74 		return -EINVAL;
75 	}
76 
77 	/*
78 	 * Give each bus_lock and msg_lock a unique key so that lockdep won't
79 	 * trigger a deadlock warning when the locks of several buses are
80 	 * grabbed during configuration of a multi-bus stream.
81 	 */
82 	lockdep_register_key(&bus->msg_lock_key);
83 	__mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
84 
85 	lockdep_register_key(&bus->bus_lock_key);
86 	__mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
87 
88 	INIT_LIST_HEAD(&bus->slaves);
89 	INIT_LIST_HEAD(&bus->m_rt_list);
90 
91 	/*
92 	 * Initialize multi_link flag
93 	 */
94 	bus->multi_link = false;
95 	if (bus->ops->read_prop) {
96 		ret = bus->ops->read_prop(bus);
97 		if (ret < 0) {
98 			dev_err(bus->dev,
99 				"Bus read properties failed:%d\n", ret);
100 			return ret;
101 		}
102 	}
103 
104 	sdw_bus_debugfs_init(bus);
105 
106 	/*
107 	 * Device numbers in SoundWire are 0 through 15. Enumeration device
108 	 * number (0), Broadcast device number (15), Group numbers (12 and
109 	 * 13) and Master device number (14) are not used for assignment so
110 	 * mask these and other higher bits.
111 	 */
112 
113 	/* Set higher order bits */
114 	*bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
115 
116 	/* Set enumeration device number and broadcast device number */
117 	set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
118 	set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
119 
120 	/* Set group device numbers and master device number */
121 	set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
122 	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
123 	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
124 
125 	ret = sdw_irq_create(bus, fwnode);
126 	if (ret)
127 		return ret;
128 
129 	/*
130 	 * SDW is an enumerable bus, but devices can be powered off. So,
131 	 * they won't be able to report as present.
132 	 *
133 	 * Create Slave devices based on Slaves described in
134 	 * the respective firmware (ACPI/DT)
135 	 */
136 	if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
137 		ret = sdw_acpi_find_slaves(bus);
138 	else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
139 		ret = sdw_of_find_slaves(bus);
140 	else
141 		ret = -ENOTSUPP; /* No ACPI/DT so error out */
142 
143 	if (ret < 0) {
144 		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
145 		sdw_irq_delete(bus);
146 		return ret;
147 	}
148 
149 	/*
150 	 * Initialize clock values based on Master properties. The max
151 	 * frequency is read from max_clk_freq property. Current assumption
152 	 * is that the bus will start at highest clock frequency when
153 	 * powered on.
154 	 *
155 	 * Default active bank will be 0 as out of reset the Slaves have
156 	 * to start with bank 0 (Table 40 of Spec)
157 	 */
158 	prop = &bus->prop;
159 	bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
160 	bus->params.curr_dr_freq = bus->params.max_dr_freq;
161 	bus->params.curr_bank = SDW_BANK0;
162 	bus->params.next_bank = SDW_BANK1;
163 
164 	return 0;
165 }
166 EXPORT_SYMBOL(sdw_bus_master_add);
167 
sdw_delete_slave(struct device * dev,void * data)168 static int sdw_delete_slave(struct device *dev, void *data)
169 {
170 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
171 	struct sdw_bus *bus = slave->bus;
172 
173 	pm_runtime_disable(dev);
174 
175 	sdw_slave_debugfs_exit(slave);
176 
177 	mutex_lock(&bus->bus_lock);
178 
179 	if (slave->dev_num) { /* clear dev_num if assigned */
180 		clear_bit(slave->dev_num, bus->assigned);
181 		if (bus->ops && bus->ops->put_device_num)
182 			bus->ops->put_device_num(bus, slave);
183 	}
184 	list_del_init(&slave->node);
185 	mutex_unlock(&bus->bus_lock);
186 
187 	device_unregister(dev);
188 	return 0;
189 }
190 
191 /**
192  * sdw_bus_master_delete() - delete the bus master instance
193  * @bus: bus to be deleted
194  *
195  * Remove the instance, delete the child devices.
196  */
sdw_bus_master_delete(struct sdw_bus * bus)197 void sdw_bus_master_delete(struct sdw_bus *bus)
198 {
199 	device_for_each_child(bus->dev, NULL, sdw_delete_slave);
200 
201 	sdw_irq_delete(bus);
202 
203 	sdw_master_device_del(bus);
204 
205 	sdw_bus_debugfs_exit(bus);
206 	lockdep_unregister_key(&bus->bus_lock_key);
207 	lockdep_unregister_key(&bus->msg_lock_key);
208 	ida_free(&sdw_bus_ida, bus->id);
209 }
210 EXPORT_SYMBOL(sdw_bus_master_delete);
211 
212 /*
213  * SDW IO Calls
214  */
215 
find_response_code(enum sdw_command_response resp)216 static inline int find_response_code(enum sdw_command_response resp)
217 {
218 	switch (resp) {
219 	case SDW_CMD_OK:
220 		return 0;
221 
222 	case SDW_CMD_IGNORED:
223 		return -ENODATA;
224 
225 	case SDW_CMD_TIMEOUT:
226 		return -ETIMEDOUT;
227 
228 	default:
229 		return -EIO;
230 	}
231 }
232 
do_transfer(struct sdw_bus * bus,struct sdw_msg * msg)233 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
234 {
235 	int retry = bus->prop.err_threshold;
236 	enum sdw_command_response resp;
237 	int ret = 0, i;
238 
239 	for (i = 0; i <= retry; i++) {
240 		resp = bus->ops->xfer_msg(bus, msg);
241 		ret = find_response_code(resp);
242 
243 		/* if cmd is ok or ignored return */
244 		if (ret == 0 || ret == -ENODATA)
245 			return ret;
246 	}
247 
248 	return ret;
249 }
250 
do_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)251 static inline int do_transfer_defer(struct sdw_bus *bus,
252 				    struct sdw_msg *msg)
253 {
254 	struct sdw_defer *defer = &bus->defer_msg;
255 	int retry = bus->prop.err_threshold;
256 	enum sdw_command_response resp;
257 	int ret = 0, i;
258 
259 	defer->msg = msg;
260 	defer->length = msg->len;
261 	init_completion(&defer->complete);
262 
263 	for (i = 0; i <= retry; i++) {
264 		resp = bus->ops->xfer_msg_defer(bus);
265 		ret = find_response_code(resp);
266 		/* if cmd is ok or ignored return */
267 		if (ret == 0 || ret == -ENODATA)
268 			return ret;
269 	}
270 
271 	return ret;
272 }
273 
sdw_transfer_unlocked(struct sdw_bus * bus,struct sdw_msg * msg)274 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
275 {
276 	int ret;
277 
278 	ret = do_transfer(bus, msg);
279 	if (ret != 0 && ret != -ENODATA)
280 		dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
281 			msg->dev_num, ret,
282 			str_write_read(msg->flags & SDW_MSG_FLAG_WRITE),
283 			msg->addr, msg->len);
284 
285 	return ret;
286 }
287 
288 /**
289  * sdw_transfer() - Synchronous transfer message to a SDW Slave device
290  * @bus: SDW bus
291  * @msg: SDW message to be xfered
292  */
sdw_transfer(struct sdw_bus * bus,struct sdw_msg * msg)293 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
294 {
295 	int ret;
296 
297 	mutex_lock(&bus->msg_lock);
298 
299 	ret = sdw_transfer_unlocked(bus, msg);
300 
301 	mutex_unlock(&bus->msg_lock);
302 
303 	return ret;
304 }
305 
306 /**
307  * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
308  * @bus: SDW bus
309  * @sync_delay: Delay before reading status
310  */
sdw_show_ping_status(struct sdw_bus * bus,bool sync_delay)311 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
312 {
313 	u32 status;
314 
315 	if (!bus->ops->read_ping_status)
316 		return;
317 
318 	/*
319 	 * wait for peripheral to sync if desired. 10-15ms should be more than
320 	 * enough in most cases.
321 	 */
322 	if (sync_delay)
323 		usleep_range(10000, 15000);
324 
325 	mutex_lock(&bus->msg_lock);
326 
327 	status = bus->ops->read_ping_status(bus);
328 
329 	mutex_unlock(&bus->msg_lock);
330 
331 	if (!status)
332 		dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
333 	else
334 		dev_dbg(bus->dev, "PING status: %#x\n", status);
335 }
336 EXPORT_SYMBOL(sdw_show_ping_status);
337 
338 /**
339  * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
340  * @bus: SDW bus
341  * @msg: SDW message to be xfered
342  *
343  * Caller needs to hold the msg_lock lock while calling this
344  */
sdw_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)345 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
346 {
347 	int ret;
348 
349 	if (!bus->ops->xfer_msg_defer)
350 		return -ENOTSUPP;
351 
352 	ret = do_transfer_defer(bus, msg);
353 	if (ret != 0 && ret != -ENODATA)
354 		dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
355 			msg->dev_num, ret);
356 
357 	return ret;
358 }
359 
sdw_fill_msg(struct sdw_msg * msg,struct sdw_slave * slave,u32 addr,size_t count,u16 dev_num,u8 flags,u8 * buf)360 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
361 		 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
362 {
363 	memset(msg, 0, sizeof(*msg));
364 	msg->addr = addr; /* addr is 16 bit and truncated here */
365 	msg->len = count;
366 	msg->dev_num = dev_num;
367 	msg->flags = flags;
368 	msg->buf = buf;
369 
370 	if (addr < SDW_REG_NO_PAGE) /* no paging area */
371 		return 0;
372 
373 	if (addr >= SDW_REG_MAX) { /* illegal addr */
374 		pr_err("SDW: Invalid address %x passed\n", addr);
375 		return -EINVAL;
376 	}
377 
378 	if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
379 		if (slave && !slave->prop.paging_support)
380 			return 0;
381 		/* no need for else as that will fall-through to paging */
382 	}
383 
384 	/* paging mandatory */
385 	if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
386 		pr_err("SDW: Invalid device for paging :%d\n", dev_num);
387 		return -EINVAL;
388 	}
389 
390 	if (!slave) {
391 		pr_err("SDW: No slave for paging addr\n");
392 		return -EINVAL;
393 	}
394 
395 	if (!slave->prop.paging_support) {
396 		dev_err(&slave->dev,
397 			"address %x needs paging but no support\n", addr);
398 		return -EINVAL;
399 	}
400 
401 	msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
402 	msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
403 	msg->addr |= BIT(15);
404 	msg->page = true;
405 
406 	return 0;
407 }
408 
409 /*
410  * Read/Write IO functions.
411  */
412 
sdw_ntransfer_no_pm(struct sdw_slave * slave,u32 addr,u8 flags,size_t count,u8 * val)413 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
414 			       size_t count, u8 *val)
415 {
416 	struct sdw_msg msg;
417 	size_t size;
418 	int ret;
419 
420 	while (count) {
421 		// Only handle bytes up to next page boundary
422 		size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
423 
424 		ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
425 		if (ret < 0)
426 			return ret;
427 
428 		ret = sdw_transfer(slave->bus, &msg);
429 		if (ret < 0 && !slave->is_mockup_device)
430 			return ret;
431 
432 		addr += size;
433 		val += size;
434 		count -= size;
435 	}
436 
437 	return 0;
438 }
439 
440 /**
441  * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
442  * @slave: SDW Slave
443  * @addr: Register address
444  * @count: length
445  * @val: Buffer for values to be read
446  *
447  * Note that if the message crosses a page boundary each page will be
448  * transferred under a separate invocation of the msg_lock.
449  */
sdw_nread_no_pm(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)450 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
451 {
452 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
453 }
454 EXPORT_SYMBOL(sdw_nread_no_pm);
455 
456 /**
457  * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
458  * @slave: SDW Slave
459  * @addr: Register address
460  * @count: length
461  * @val: Buffer for values to be written
462  *
463  * Note that if the message crosses a page boundary each page will be
464  * transferred under a separate invocation of the msg_lock.
465  */
sdw_nwrite_no_pm(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)466 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
467 {
468 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
469 }
470 EXPORT_SYMBOL(sdw_nwrite_no_pm);
471 
472 /**
473  * sdw_write_no_pm() - Write a SDW Slave register with no PM
474  * @slave: SDW Slave
475  * @addr: Register address
476  * @value: Register value
477  */
sdw_write_no_pm(struct sdw_slave * slave,u32 addr,u8 value)478 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
479 {
480 	return sdw_nwrite_no_pm(slave, addr, 1, &value);
481 }
482 EXPORT_SYMBOL(sdw_write_no_pm);
483 
484 static int
sdw_bread_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr)485 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
486 {
487 	struct sdw_msg msg;
488 	u8 buf;
489 	int ret;
490 
491 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
492 			   SDW_MSG_FLAG_READ, &buf);
493 	if (ret < 0)
494 		return ret;
495 
496 	ret = sdw_transfer(bus, &msg);
497 	if (ret < 0)
498 		return ret;
499 
500 	return buf;
501 }
502 
503 static int
sdw_bwrite_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)504 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
505 {
506 	struct sdw_msg msg;
507 	int ret;
508 
509 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
510 			   SDW_MSG_FLAG_WRITE, &value);
511 	if (ret < 0)
512 		return ret;
513 
514 	return sdw_transfer(bus, &msg);
515 }
516 
sdw_bread_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr)517 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
518 {
519 	struct sdw_msg msg;
520 	u8 buf;
521 	int ret;
522 
523 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
524 			   SDW_MSG_FLAG_READ, &buf);
525 	if (ret < 0)
526 		return ret;
527 
528 	ret = sdw_transfer_unlocked(bus, &msg);
529 	if (ret < 0)
530 		return ret;
531 
532 	return buf;
533 }
534 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
535 
sdw_bwrite_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)536 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
537 {
538 	struct sdw_msg msg;
539 	int ret;
540 
541 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
542 			   SDW_MSG_FLAG_WRITE, &value);
543 	if (ret < 0)
544 		return ret;
545 
546 	return sdw_transfer_unlocked(bus, &msg);
547 }
548 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
549 
550 /**
551  * sdw_read_no_pm() - Read a SDW Slave register with no PM
552  * @slave: SDW Slave
553  * @addr: Register address
554  */
sdw_read_no_pm(struct sdw_slave * slave,u32 addr)555 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
556 {
557 	u8 buf;
558 	int ret;
559 
560 	ret = sdw_nread_no_pm(slave, addr, 1, &buf);
561 	if (ret < 0)
562 		return ret;
563 	else
564 		return buf;
565 }
566 EXPORT_SYMBOL(sdw_read_no_pm);
567 
sdw_update_no_pm(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)568 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
569 {
570 	int tmp;
571 
572 	tmp = sdw_read_no_pm(slave, addr);
573 	if (tmp < 0)
574 		return tmp;
575 
576 	tmp = (tmp & ~mask) | val;
577 	return sdw_write_no_pm(slave, addr, tmp);
578 }
579 EXPORT_SYMBOL(sdw_update_no_pm);
580 
581 /* Read-Modify-Write Slave register */
sdw_update(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)582 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
583 {
584 	int tmp;
585 
586 	tmp = sdw_read(slave, addr);
587 	if (tmp < 0)
588 		return tmp;
589 
590 	tmp = (tmp & ~mask) | val;
591 	return sdw_write(slave, addr, tmp);
592 }
593 EXPORT_SYMBOL(sdw_update);
594 
595 /**
596  * sdw_nread() - Read "n" contiguous SDW Slave registers
597  * @slave: SDW Slave
598  * @addr: Register address
599  * @count: length
600  * @val: Buffer for values to be read
601  *
602  * This version of the function will take a PM reference to the slave
603  * device.
604  * Note that if the message crosses a page boundary each page will be
605  * transferred under a separate invocation of the msg_lock.
606  */
sdw_nread(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)607 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
608 {
609 	int ret;
610 
611 	ret = pm_runtime_get_sync(&slave->dev);
612 	if (ret < 0 && ret != -EACCES) {
613 		pm_runtime_put_noidle(&slave->dev);
614 		return ret;
615 	}
616 
617 	ret = sdw_nread_no_pm(slave, addr, count, val);
618 
619 	pm_runtime_mark_last_busy(&slave->dev);
620 	pm_runtime_put(&slave->dev);
621 
622 	return ret;
623 }
624 EXPORT_SYMBOL(sdw_nread);
625 
626 /**
627  * sdw_nwrite() - Write "n" contiguous SDW Slave registers
628  * @slave: SDW Slave
629  * @addr: Register address
630  * @count: length
631  * @val: Buffer for values to be written
632  *
633  * This version of the function will take a PM reference to the slave
634  * device.
635  * Note that if the message crosses a page boundary each page will be
636  * transferred under a separate invocation of the msg_lock.
637  */
sdw_nwrite(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)638 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
639 {
640 	int ret;
641 
642 	ret = pm_runtime_get_sync(&slave->dev);
643 	if (ret < 0 && ret != -EACCES) {
644 		pm_runtime_put_noidle(&slave->dev);
645 		return ret;
646 	}
647 
648 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
649 
650 	pm_runtime_mark_last_busy(&slave->dev);
651 	pm_runtime_put(&slave->dev);
652 
653 	return ret;
654 }
655 EXPORT_SYMBOL(sdw_nwrite);
656 
657 /**
658  * sdw_read() - Read a SDW Slave register
659  * @slave: SDW Slave
660  * @addr: Register address
661  *
662  * This version of the function will take a PM reference to the slave
663  * device.
664  */
sdw_read(struct sdw_slave * slave,u32 addr)665 int sdw_read(struct sdw_slave *slave, u32 addr)
666 {
667 	u8 buf;
668 	int ret;
669 
670 	ret = sdw_nread(slave, addr, 1, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	return buf;
675 }
676 EXPORT_SYMBOL(sdw_read);
677 
678 /**
679  * sdw_write() - Write a SDW Slave register
680  * @slave: SDW Slave
681  * @addr: Register address
682  * @value: Register value
683  *
684  * This version of the function will take a PM reference to the slave
685  * device.
686  */
sdw_write(struct sdw_slave * slave,u32 addr,u8 value)687 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
688 {
689 	return sdw_nwrite(slave, addr, 1, &value);
690 }
691 EXPORT_SYMBOL(sdw_write);
692 
693 /*
694  * SDW alert handling
695  */
696 
697 /* called with bus_lock held */
sdw_get_slave(struct sdw_bus * bus,int i)698 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
699 {
700 	struct sdw_slave *slave;
701 
702 	list_for_each_entry(slave, &bus->slaves, node) {
703 		if (slave->dev_num == i)
704 			return slave;
705 	}
706 
707 	return NULL;
708 }
709 
sdw_compare_devid(struct sdw_slave * slave,struct sdw_slave_id id)710 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
711 {
712 	if (slave->id.mfg_id != id.mfg_id ||
713 	    slave->id.part_id != id.part_id ||
714 	    slave->id.class_id != id.class_id ||
715 	    (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
716 	     slave->id.unique_id != id.unique_id))
717 		return -ENODEV;
718 
719 	return 0;
720 }
721 EXPORT_SYMBOL(sdw_compare_devid);
722 
723 /* called with bus_lock held */
sdw_get_device_num(struct sdw_slave * slave)724 static int sdw_get_device_num(struct sdw_slave *slave)
725 {
726 	struct sdw_bus *bus = slave->bus;
727 	int bit;
728 
729 	if (bus->ops && bus->ops->get_device_num) {
730 		bit = bus->ops->get_device_num(bus, slave);
731 		if (bit < 0)
732 			goto err;
733 	} else {
734 		bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
735 		if (bit == SDW_MAX_DEVICES) {
736 			bit = -ENODEV;
737 			goto err;
738 		}
739 	}
740 
741 	/*
742 	 * Do not update dev_num in Slave data structure here,
743 	 * Update once program dev_num is successful
744 	 */
745 	set_bit(bit, bus->assigned);
746 
747 err:
748 	return bit;
749 }
750 
sdw_assign_device_num(struct sdw_slave * slave)751 static int sdw_assign_device_num(struct sdw_slave *slave)
752 {
753 	struct sdw_bus *bus = slave->bus;
754 	int ret, dev_num;
755 	bool new_device = false;
756 
757 	/* check first if device number is assigned, if so reuse that */
758 	if (!slave->dev_num) {
759 		if (!slave->dev_num_sticky) {
760 			mutex_lock(&slave->bus->bus_lock);
761 			dev_num = sdw_get_device_num(slave);
762 			mutex_unlock(&slave->bus->bus_lock);
763 			if (dev_num < 0) {
764 				dev_err(bus->dev, "Get dev_num failed: %d\n",
765 					dev_num);
766 				return dev_num;
767 			}
768 			slave->dev_num = dev_num;
769 			slave->dev_num_sticky = dev_num;
770 			new_device = true;
771 		} else {
772 			slave->dev_num = slave->dev_num_sticky;
773 		}
774 	}
775 
776 	if (!new_device)
777 		dev_dbg(bus->dev,
778 			"Slave already registered, reusing dev_num:%d\n",
779 			slave->dev_num);
780 
781 	/* Clear the slave->dev_num to transfer message on device 0 */
782 	dev_num = slave->dev_num;
783 	slave->dev_num = 0;
784 
785 	ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
786 	if (ret < 0) {
787 		dev_err(bus->dev, "Program device_num %d failed: %d\n",
788 			dev_num, ret);
789 		return ret;
790 	}
791 
792 	/* After xfer of msg, restore dev_num */
793 	slave->dev_num = slave->dev_num_sticky;
794 
795 	if (bus->ops && bus->ops->new_peripheral_assigned)
796 		bus->ops->new_peripheral_assigned(bus, slave, dev_num);
797 
798 	return 0;
799 }
800 
sdw_extract_slave_id(struct sdw_bus * bus,u64 addr,struct sdw_slave_id * id)801 void sdw_extract_slave_id(struct sdw_bus *bus,
802 			  u64 addr, struct sdw_slave_id *id)
803 {
804 	dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
805 
806 	id->sdw_version = SDW_VERSION(addr);
807 	id->unique_id = SDW_UNIQUE_ID(addr);
808 	id->mfg_id = SDW_MFG_ID(addr);
809 	id->part_id = SDW_PART_ID(addr);
810 	id->class_id = SDW_CLASS_ID(addr);
811 
812 	dev_dbg(bus->dev,
813 		"SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
814 		id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
815 }
816 EXPORT_SYMBOL(sdw_extract_slave_id);
817 
is_clock_scaling_supported_by_slave(struct sdw_slave * slave)818 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
819 {
820 	/*
821 	 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
822 	 * can't support dynamic scaling. We might need a quirk to handle such devices.
823 	 */
824 	return slave->id.class_id;
825 }
826 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
827 
sdw_program_device_num(struct sdw_bus * bus,bool * programmed)828 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
829 {
830 	u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
831 	struct sdw_slave *slave, *_s;
832 	struct sdw_slave_id id;
833 	struct sdw_msg msg;
834 	bool found;
835 	int count = 0, ret;
836 	u64 addr;
837 
838 	*programmed = false;
839 
840 	/* No Slave, so use raw xfer api */
841 	ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
842 			   SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
843 	if (ret < 0)
844 		return ret;
845 
846 	do {
847 		ret = sdw_transfer(bus, &msg);
848 		if (ret == -ENODATA) { /* end of device id reads */
849 			dev_dbg(bus->dev, "No more devices to enumerate\n");
850 			ret = 0;
851 			break;
852 		}
853 		if (ret < 0) {
854 			dev_err(bus->dev, "DEVID read fail:%d\n", ret);
855 			break;
856 		}
857 
858 		/*
859 		 * Construct the addr and extract. Cast the higher shift
860 		 * bits to avoid truncation due to size limit.
861 		 */
862 		addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
863 			((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
864 			((u64)buf[0] << 40);
865 
866 		sdw_extract_slave_id(bus, addr, &id);
867 
868 		found = false;
869 		/* Now compare with entries */
870 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
871 			if (sdw_compare_devid(slave, id) == 0) {
872 				found = true;
873 
874 				/*
875 				 * To prevent skipping state-machine stages don't
876 				 * program a device until we've seen it UNATTACH.
877 				 * Must return here because no other device on #0
878 				 * can be detected until this one has been
879 				 * assigned a device ID.
880 				 */
881 				if (slave->status != SDW_SLAVE_UNATTACHED)
882 					return 0;
883 
884 				/*
885 				 * Assign a new dev_num to this Slave and
886 				 * not mark it present. It will be marked
887 				 * present after it reports ATTACHED on new
888 				 * dev_num
889 				 */
890 				ret = sdw_assign_device_num(slave);
891 				if (ret < 0) {
892 					dev_err(bus->dev,
893 						"Assign dev_num failed:%d\n",
894 						ret);
895 					return ret;
896 				}
897 
898 				*programmed = true;
899 
900 				break;
901 			}
902 		}
903 
904 		if (!found) {
905 			/* TODO: Park this device in Group 13 */
906 
907 			/*
908 			 * add Slave device even if there is no platform
909 			 * firmware description. There will be no driver probe
910 			 * but the user/integration will be able to see the
911 			 * device, enumeration status and device number in sysfs
912 			 */
913 			sdw_slave_add(bus, &id, NULL);
914 
915 			dev_err(bus->dev, "Slave Entry not found\n");
916 		}
917 
918 		count++;
919 
920 		/*
921 		 * Check till error out or retry (count) exhausts.
922 		 * Device can drop off and rejoin during enumeration
923 		 * so count till twice the bound.
924 		 */
925 
926 	} while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
927 
928 	return ret;
929 }
930 
sdw_modify_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)931 static void sdw_modify_slave_status(struct sdw_slave *slave,
932 				    enum sdw_slave_status status)
933 {
934 	struct sdw_bus *bus = slave->bus;
935 
936 	mutex_lock(&bus->bus_lock);
937 
938 	dev_vdbg(bus->dev,
939 		 "changing status slave %d status %d new status %d\n",
940 		 slave->dev_num, slave->status, status);
941 
942 	if (status == SDW_SLAVE_UNATTACHED) {
943 		dev_dbg(&slave->dev,
944 			"initializing enumeration and init completion for Slave %d\n",
945 			slave->dev_num);
946 
947 		reinit_completion(&slave->enumeration_complete);
948 		reinit_completion(&slave->initialization_complete);
949 
950 	} else if ((status == SDW_SLAVE_ATTACHED) &&
951 		   (slave->status == SDW_SLAVE_UNATTACHED)) {
952 		dev_dbg(&slave->dev,
953 			"signaling enumeration completion for Slave %d\n",
954 			slave->dev_num);
955 
956 		complete_all(&slave->enumeration_complete);
957 	}
958 	slave->status = status;
959 	mutex_unlock(&bus->bus_lock);
960 }
961 
sdw_slave_clk_stop_callback(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,enum sdw_clk_stop_type type)962 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
963 				       enum sdw_clk_stop_mode mode,
964 				       enum sdw_clk_stop_type type)
965 {
966 	int ret = 0;
967 
968 	mutex_lock(&slave->sdw_dev_lock);
969 
970 	if (slave->probed)  {
971 		struct device *dev = &slave->dev;
972 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
973 
974 		if (drv->ops && drv->ops->clk_stop)
975 			ret = drv->ops->clk_stop(slave, mode, type);
976 	}
977 
978 	mutex_unlock(&slave->sdw_dev_lock);
979 
980 	return ret;
981 }
982 
sdw_slave_clk_stop_prepare(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,bool prepare)983 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
984 				      enum sdw_clk_stop_mode mode,
985 				      bool prepare)
986 {
987 	bool wake_en;
988 	u32 val = 0;
989 	int ret;
990 
991 	wake_en = slave->prop.wake_capable;
992 
993 	if (prepare) {
994 		val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
995 
996 		if (mode == SDW_CLK_STOP_MODE1)
997 			val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
998 
999 		if (wake_en)
1000 			val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
1001 	} else {
1002 		ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
1003 		if (ret < 0) {
1004 			if (ret != -ENODATA)
1005 				dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
1006 			return ret;
1007 		}
1008 		val = ret;
1009 		val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
1010 	}
1011 
1012 	ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
1013 
1014 	if (ret < 0 && ret != -ENODATA)
1015 		dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
1016 
1017 	return ret;
1018 }
1019 
sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus * bus,u16 dev_num,bool prepare)1020 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
1021 {
1022 	int retry = bus->clk_stop_timeout;
1023 	int val;
1024 
1025 	do {
1026 		val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1027 		if (val < 0) {
1028 			if (val != -ENODATA)
1029 				dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1030 			return val;
1031 		}
1032 		val &= SDW_SCP_STAT_CLK_STP_NF;
1033 		if (!val) {
1034 			dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
1035 				prepare ? "prepare" : "deprepare",
1036 				dev_num);
1037 			return 0;
1038 		}
1039 
1040 		usleep_range(1000, 1500);
1041 		retry--;
1042 	} while (retry);
1043 
1044 	dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
1045 		prepare ? "prepare" : "deprepare",
1046 		dev_num);
1047 
1048 	return -ETIMEDOUT;
1049 }
1050 
1051 /**
1052  * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1053  *
1054  * @bus: SDW bus instance
1055  *
1056  * Query Slave for clock stop mode and prepare for that mode.
1057  */
sdw_bus_prep_clk_stop(struct sdw_bus * bus)1058 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1059 {
1060 	bool simple_clk_stop = true;
1061 	struct sdw_slave *slave;
1062 	bool is_slave = false;
1063 	int ret = 0;
1064 
1065 	/*
1066 	 * In order to save on transition time, prepare
1067 	 * each Slave and then wait for all Slave(s) to be
1068 	 * prepared for clock stop.
1069 	 * If one of the Slave devices has lost sync and
1070 	 * replies with Command Ignored/-ENODATA, we continue
1071 	 * the loop
1072 	 */
1073 	list_for_each_entry(slave, &bus->slaves, node) {
1074 		if (!slave->dev_num)
1075 			continue;
1076 
1077 		if (slave->status != SDW_SLAVE_ATTACHED &&
1078 		    slave->status != SDW_SLAVE_ALERT)
1079 			continue;
1080 
1081 		/* Identify if Slave(s) are available on Bus */
1082 		is_slave = true;
1083 
1084 		ret = sdw_slave_clk_stop_callback(slave,
1085 						  SDW_CLK_STOP_MODE0,
1086 						  SDW_CLK_PRE_PREPARE);
1087 		if (ret < 0 && ret != -ENODATA) {
1088 			dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1089 			return ret;
1090 		}
1091 
1092 		/* Only prepare a Slave device if needed */
1093 		if (!slave->prop.simple_clk_stop_capable) {
1094 			simple_clk_stop = false;
1095 
1096 			ret = sdw_slave_clk_stop_prepare(slave,
1097 							 SDW_CLK_STOP_MODE0,
1098 							 true);
1099 			if (ret < 0 && ret != -ENODATA) {
1100 				dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1101 				return ret;
1102 			}
1103 		}
1104 	}
1105 
1106 	/* Skip remaining clock stop preparation if no Slave is attached */
1107 	if (!is_slave)
1108 		return 0;
1109 
1110 	/*
1111 	 * Don't wait for all Slaves to be ready if they follow the simple
1112 	 * state machine
1113 	 */
1114 	if (!simple_clk_stop) {
1115 		ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1116 						       SDW_BROADCAST_DEV_NUM, true);
1117 		/*
1118 		 * if there are no Slave devices present and the reply is
1119 		 * Command_Ignored/-ENODATA, we don't need to continue with the
1120 		 * flow and can just return here. The error code is not modified
1121 		 * and its handling left as an exercise for the caller.
1122 		 */
1123 		if (ret < 0)
1124 			return ret;
1125 	}
1126 
1127 	/* Inform slaves that prep is done */
1128 	list_for_each_entry(slave, &bus->slaves, node) {
1129 		if (!slave->dev_num)
1130 			continue;
1131 
1132 		if (slave->status != SDW_SLAVE_ATTACHED &&
1133 		    slave->status != SDW_SLAVE_ALERT)
1134 			continue;
1135 
1136 		ret = sdw_slave_clk_stop_callback(slave,
1137 						  SDW_CLK_STOP_MODE0,
1138 						  SDW_CLK_POST_PREPARE);
1139 
1140 		if (ret < 0 && ret != -ENODATA) {
1141 			dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1142 			return ret;
1143 		}
1144 	}
1145 
1146 	return 0;
1147 }
1148 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1149 
1150 /**
1151  * sdw_bus_clk_stop: stop bus clock
1152  *
1153  * @bus: SDW bus instance
1154  *
1155  * After preparing the Slaves for clock stop, stop the clock by broadcasting
1156  * write to SCP_CTRL register.
1157  */
sdw_bus_clk_stop(struct sdw_bus * bus)1158 int sdw_bus_clk_stop(struct sdw_bus *bus)
1159 {
1160 	int ret;
1161 
1162 	/*
1163 	 * broadcast clock stop now, attached Slaves will ACK this,
1164 	 * unattached will ignore
1165 	 */
1166 	ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1167 			       SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1168 	if (ret < 0) {
1169 		if (ret != -ENODATA)
1170 			dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1171 		return ret;
1172 	}
1173 
1174 	return 0;
1175 }
1176 EXPORT_SYMBOL(sdw_bus_clk_stop);
1177 
1178 /**
1179  * sdw_bus_exit_clk_stop: Exit clock stop mode
1180  *
1181  * @bus: SDW bus instance
1182  *
1183  * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1184  * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1185  * back.
1186  */
sdw_bus_exit_clk_stop(struct sdw_bus * bus)1187 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1188 {
1189 	bool simple_clk_stop = true;
1190 	struct sdw_slave *slave;
1191 	bool is_slave = false;
1192 	int ret;
1193 
1194 	/*
1195 	 * In order to save on transition time, de-prepare
1196 	 * each Slave and then wait for all Slave(s) to be
1197 	 * de-prepared after clock resume.
1198 	 */
1199 	list_for_each_entry(slave, &bus->slaves, node) {
1200 		if (!slave->dev_num)
1201 			continue;
1202 
1203 		if (slave->status != SDW_SLAVE_ATTACHED &&
1204 		    slave->status != SDW_SLAVE_ALERT)
1205 			continue;
1206 
1207 		/* Identify if Slave(s) are available on Bus */
1208 		is_slave = true;
1209 
1210 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1211 						  SDW_CLK_PRE_DEPREPARE);
1212 		if (ret < 0)
1213 			dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1214 
1215 		/* Only de-prepare a Slave device if needed */
1216 		if (!slave->prop.simple_clk_stop_capable) {
1217 			simple_clk_stop = false;
1218 
1219 			ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1220 							 false);
1221 
1222 			if (ret < 0)
1223 				dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1224 		}
1225 	}
1226 
1227 	/* Skip remaining clock stop de-preparation if no Slave is attached */
1228 	if (!is_slave)
1229 		return 0;
1230 
1231 	/*
1232 	 * Don't wait for all Slaves to be ready if they follow the simple
1233 	 * state machine
1234 	 */
1235 	if (!simple_clk_stop) {
1236 		ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
1237 		if (ret < 0)
1238 			dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1239 	}
1240 
1241 	list_for_each_entry(slave, &bus->slaves, node) {
1242 		if (!slave->dev_num)
1243 			continue;
1244 
1245 		if (slave->status != SDW_SLAVE_ATTACHED &&
1246 		    slave->status != SDW_SLAVE_ALERT)
1247 			continue;
1248 
1249 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1250 						  SDW_CLK_POST_DEPREPARE);
1251 		if (ret < 0)
1252 			dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1253 	}
1254 
1255 	return 0;
1256 }
1257 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1258 
sdw_configure_dpn_intr(struct sdw_slave * slave,int port,bool enable,int mask)1259 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1260 			   int port, bool enable, int mask)
1261 {
1262 	u32 addr;
1263 	int ret;
1264 	u8 val = 0;
1265 
1266 	if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1267 		dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1268 			str_on_off(enable));
1269 		mask |= SDW_DPN_INT_TEST_FAIL;
1270 	}
1271 
1272 	addr = SDW_DPN_INTMASK(port);
1273 
1274 	/* Set/Clear port ready interrupt mask */
1275 	if (enable) {
1276 		val |= mask;
1277 		val |= SDW_DPN_INT_PORT_READY;
1278 	} else {
1279 		val &= ~(mask);
1280 		val &= ~SDW_DPN_INT_PORT_READY;
1281 	}
1282 
1283 	ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1284 	if (ret < 0)
1285 		dev_err(&slave->dev,
1286 			"SDW_DPN_INTMASK write failed:%d\n", val);
1287 
1288 	return ret;
1289 }
1290 
sdw_slave_get_scale_index(struct sdw_slave * slave,u8 * base)1291 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
1292 {
1293 	u32 mclk_freq = slave->bus->prop.mclk_freq;
1294 	u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1295 	unsigned int scale;
1296 	u8 scale_index;
1297 
1298 	if (!mclk_freq) {
1299 		dev_err(&slave->dev,
1300 			"no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1301 		return -EINVAL;
1302 	}
1303 
1304 	/*
1305 	 * map base frequency using Table 89 of SoundWire 1.2 spec.
1306 	 * The order of the tests just follows the specification, this
1307 	 * is not a selection between possible values or a search for
1308 	 * the best value but just a mapping.  Only one case per platform
1309 	 * is relevant.
1310 	 * Some BIOS have inconsistent values for mclk_freq but a
1311 	 * correct root so we force the mclk_freq to avoid variations.
1312 	 */
1313 	if (!(19200000 % mclk_freq)) {
1314 		mclk_freq = 19200000;
1315 		*base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1316 	} else if (!(22579200 % mclk_freq)) {
1317 		mclk_freq = 22579200;
1318 		*base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1319 	} else if (!(24576000 % mclk_freq)) {
1320 		mclk_freq = 24576000;
1321 		*base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1322 	} else if (!(32000000 % mclk_freq)) {
1323 		mclk_freq = 32000000;
1324 		*base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1325 	} else if (!(96000000 % mclk_freq)) {
1326 		mclk_freq = 24000000;
1327 		*base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1328 	} else {
1329 		dev_err(&slave->dev,
1330 			"Unsupported clock base, mclk %d\n",
1331 			mclk_freq);
1332 		return -EINVAL;
1333 	}
1334 
1335 	if (mclk_freq % curr_freq) {
1336 		dev_err(&slave->dev,
1337 			"mclk %d is not multiple of bus curr_freq %d\n",
1338 			mclk_freq, curr_freq);
1339 		return -EINVAL;
1340 	}
1341 
1342 	scale = mclk_freq / curr_freq;
1343 
1344 	/*
1345 	 * map scale to Table 90 of SoundWire 1.2 spec - and check
1346 	 * that the scale is a power of two and maximum 64
1347 	 */
1348 	scale_index = ilog2(scale);
1349 
1350 	if (BIT(scale_index) != scale || scale_index > 6) {
1351 		dev_err(&slave->dev,
1352 			"No match found for scale %d, bus mclk %d curr_freq %d\n",
1353 			scale, mclk_freq, curr_freq);
1354 		return -EINVAL;
1355 	}
1356 	scale_index++;
1357 
1358 	dev_dbg(&slave->dev,
1359 		"Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1360 		*base, scale_index, mclk_freq, curr_freq);
1361 
1362 	return scale_index;
1363 }
1364 EXPORT_SYMBOL(sdw_slave_get_scale_index);
1365 
sdw_slave_set_frequency(struct sdw_slave * slave)1366 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1367 {
1368 	int scale_index;
1369 	u8 base;
1370 	int ret;
1371 
1372 	/*
1373 	 * frequency base and scale registers are required for SDCA
1374 	 * devices. They may also be used for 1.2+/non-SDCA devices.
1375 	 * Driver can set the property directly, for now there's no
1376 	 * DisCo property to discover support for the scaling registers
1377 	 * from platform firmware.
1378 	 */
1379 	if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1380 		return 0;
1381 
1382 	scale_index = sdw_slave_get_scale_index(slave, &base);
1383 	if (scale_index < 0)
1384 		return scale_index;
1385 
1386 	ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1387 	if (ret < 0) {
1388 		dev_err(&slave->dev,
1389 			"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1390 		return ret;
1391 	}
1392 
1393 	/* initialize scale for both banks */
1394 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1395 	if (ret < 0) {
1396 		dev_err(&slave->dev,
1397 			"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1398 		return ret;
1399 	}
1400 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1401 	if (ret < 0)
1402 		dev_err(&slave->dev,
1403 			"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1404 
1405 	return ret;
1406 }
1407 
sdw_initialize_slave(struct sdw_slave * slave)1408 static int sdw_initialize_slave(struct sdw_slave *slave)
1409 {
1410 	struct sdw_slave_prop *prop = &slave->prop;
1411 	int status;
1412 	int ret;
1413 	u8 val;
1414 
1415 	ret = sdw_slave_set_frequency(slave);
1416 	if (ret < 0)
1417 		return ret;
1418 
1419 	if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1420 		/* Clear bus clash interrupt before enabling interrupt mask */
1421 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1422 		if (status < 0) {
1423 			dev_err(&slave->dev,
1424 				"SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1425 			return status;
1426 		}
1427 		if (status & SDW_SCP_INT1_BUS_CLASH) {
1428 			dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1429 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1430 			if (ret < 0) {
1431 				dev_err(&slave->dev,
1432 					"SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1433 				return ret;
1434 			}
1435 		}
1436 	}
1437 	if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1438 	    !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1439 		/* Clear parity interrupt before enabling interrupt mask */
1440 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1441 		if (status < 0) {
1442 			dev_err(&slave->dev,
1443 				"SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1444 			return status;
1445 		}
1446 		if (status & SDW_SCP_INT1_PARITY) {
1447 			dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1448 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1449 			if (ret < 0) {
1450 				dev_err(&slave->dev,
1451 					"SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1452 				return ret;
1453 			}
1454 		}
1455 	}
1456 
1457 	/*
1458 	 * Set SCP_INT1_MASK register, typically bus clash and
1459 	 * implementation-defined interrupt mask. The Parity detection
1460 	 * may not always be correct on startup so its use is
1461 	 * device-dependent, it might e.g. only be enabled in
1462 	 * steady-state after a couple of frames.
1463 	 */
1464 	val = prop->scp_int1_mask;
1465 
1466 	/* Enable SCP interrupts */
1467 	ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1468 	if (ret < 0) {
1469 		dev_err(&slave->dev,
1470 			"SDW_SCP_INTMASK1 write failed:%d\n", ret);
1471 		return ret;
1472 	}
1473 
1474 	/* No need to continue if DP0 is not present */
1475 	if (!prop->dp0_prop)
1476 		return 0;
1477 
1478 	/* Enable DP0 interrupts */
1479 	val = prop->dp0_prop->imp_def_interrupts;
1480 	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1481 
1482 	ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1483 	if (ret < 0)
1484 		dev_err(&slave->dev,
1485 			"SDW_DP0_INTMASK read failed:%d\n", ret);
1486 	return ret;
1487 }
1488 
sdw_handle_dp0_interrupt(struct sdw_slave * slave,u8 * slave_status)1489 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1490 {
1491 	u8 clear, impl_int_mask;
1492 	int status, status2, ret, count = 0;
1493 
1494 	status = sdw_read_no_pm(slave, SDW_DP0_INT);
1495 	if (status < 0) {
1496 		dev_err(&slave->dev,
1497 			"SDW_DP0_INT read failed:%d\n", status);
1498 		return status;
1499 	}
1500 
1501 	do {
1502 		clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE);
1503 
1504 		if (status & SDW_DP0_INT_TEST_FAIL) {
1505 			dev_err(&slave->dev, "Test fail for port 0\n");
1506 			clear |= SDW_DP0_INT_TEST_FAIL;
1507 		}
1508 
1509 		/*
1510 		 * Assumption: PORT_READY interrupt will be received only for
1511 		 * ports implementing Channel Prepare state machine (CP_SM)
1512 		 */
1513 
1514 		if (status & SDW_DP0_INT_PORT_READY) {
1515 			complete(&slave->port_ready[0]);
1516 			clear |= SDW_DP0_INT_PORT_READY;
1517 		}
1518 
1519 		if (status & SDW_DP0_INT_BRA_FAILURE) {
1520 			dev_err(&slave->dev, "BRA failed\n");
1521 			clear |= SDW_DP0_INT_BRA_FAILURE;
1522 		}
1523 
1524 		impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1525 			SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1526 
1527 		if (status & impl_int_mask) {
1528 			clear |= impl_int_mask;
1529 			*slave_status = clear;
1530 		}
1531 
1532 		/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1533 		ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1534 		if (ret < 0) {
1535 			dev_err(&slave->dev,
1536 				"SDW_DP0_INT write failed:%d\n", ret);
1537 			return ret;
1538 		}
1539 
1540 		/* Read DP0 interrupt again */
1541 		status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1542 		if (status2 < 0) {
1543 			dev_err(&slave->dev,
1544 				"SDW_DP0_INT read failed:%d\n", status2);
1545 			return status2;
1546 		}
1547 		/* filter to limit loop to interrupts identified in the first status read */
1548 		status &= status2;
1549 
1550 		count++;
1551 
1552 		/* we can get alerts while processing so keep retrying */
1553 	} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1554 
1555 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1556 		dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1557 
1558 	return ret;
1559 }
1560 
sdw_handle_port_interrupt(struct sdw_slave * slave,int port,u8 * slave_status)1561 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1562 				     int port, u8 *slave_status)
1563 {
1564 	u8 clear, impl_int_mask;
1565 	int status, status2, ret, count = 0;
1566 	u32 addr;
1567 
1568 	if (port == 0)
1569 		return sdw_handle_dp0_interrupt(slave, slave_status);
1570 
1571 	addr = SDW_DPN_INT(port);
1572 	status = sdw_read_no_pm(slave, addr);
1573 	if (status < 0) {
1574 		dev_err(&slave->dev,
1575 			"SDW_DPN_INT read failed:%d\n", status);
1576 
1577 		return status;
1578 	}
1579 
1580 	do {
1581 		clear = status & ~SDW_DPN_INTERRUPTS;
1582 
1583 		if (status & SDW_DPN_INT_TEST_FAIL) {
1584 			dev_err(&slave->dev, "Test fail for port:%d\n", port);
1585 			clear |= SDW_DPN_INT_TEST_FAIL;
1586 		}
1587 
1588 		/*
1589 		 * Assumption: PORT_READY interrupt will be received only
1590 		 * for ports implementing CP_SM.
1591 		 */
1592 		if (status & SDW_DPN_INT_PORT_READY) {
1593 			complete(&slave->port_ready[port]);
1594 			clear |= SDW_DPN_INT_PORT_READY;
1595 		}
1596 
1597 		impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1598 			SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1599 
1600 		if (status & impl_int_mask) {
1601 			clear |= impl_int_mask;
1602 			*slave_status = clear;
1603 		}
1604 
1605 		/* clear the interrupt but don't touch reserved fields */
1606 		ret = sdw_write_no_pm(slave, addr, clear);
1607 		if (ret < 0) {
1608 			dev_err(&slave->dev,
1609 				"SDW_DPN_INT write failed:%d\n", ret);
1610 			return ret;
1611 		}
1612 
1613 		/* Read DPN interrupt again */
1614 		status2 = sdw_read_no_pm(slave, addr);
1615 		if (status2 < 0) {
1616 			dev_err(&slave->dev,
1617 				"SDW_DPN_INT read failed:%d\n", status2);
1618 			return status2;
1619 		}
1620 		/* filter to limit loop to interrupts identified in the first status read */
1621 		status &= status2;
1622 
1623 		count++;
1624 
1625 		/* we can get alerts while processing so keep retrying */
1626 	} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1627 
1628 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1629 		dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1630 
1631 	return ret;
1632 }
1633 
sdw_handle_slave_alerts(struct sdw_slave * slave)1634 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1635 {
1636 	struct sdw_slave_intr_status slave_intr;
1637 	u8 clear = 0, bit, port_status[15] = {0};
1638 	int port_num, stat, ret, count = 0;
1639 	unsigned long port;
1640 	bool slave_notify;
1641 	u8 sdca_cascade = 0;
1642 	u8 buf, buf2[2];
1643 	bool parity_check;
1644 	bool parity_quirk;
1645 
1646 	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1647 
1648 	ret = pm_runtime_get_sync(&slave->dev);
1649 	if (ret < 0 && ret != -EACCES) {
1650 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1651 		pm_runtime_put_noidle(&slave->dev);
1652 		return ret;
1653 	}
1654 
1655 	/* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1656 	ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1657 	if (ret < 0) {
1658 		dev_err(&slave->dev,
1659 			"SDW_SCP_INT1 read failed:%d\n", ret);
1660 		goto io_err;
1661 	}
1662 	buf = ret;
1663 
1664 	ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1665 	if (ret < 0) {
1666 		dev_err(&slave->dev,
1667 			"SDW_SCP_INT2/3 read failed:%d\n", ret);
1668 		goto io_err;
1669 	}
1670 
1671 	if (slave->id.class_id) {
1672 		ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1673 		if (ret < 0) {
1674 			dev_err(&slave->dev,
1675 				"SDW_DP0_INT read failed:%d\n", ret);
1676 			goto io_err;
1677 		}
1678 		sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1679 	}
1680 
1681 	do {
1682 		slave_notify = false;
1683 
1684 		/*
1685 		 * Check parity, bus clash and Slave (impl defined)
1686 		 * interrupt
1687 		 */
1688 		if (buf & SDW_SCP_INT1_PARITY) {
1689 			parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1690 			parity_quirk = !slave->first_interrupt_done &&
1691 				(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1692 
1693 			if (parity_check && !parity_quirk)
1694 				dev_err(&slave->dev, "Parity error detected\n");
1695 			clear |= SDW_SCP_INT1_PARITY;
1696 		}
1697 
1698 		if (buf & SDW_SCP_INT1_BUS_CLASH) {
1699 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1700 				dev_err(&slave->dev, "Bus clash detected\n");
1701 			clear |= SDW_SCP_INT1_BUS_CLASH;
1702 		}
1703 
1704 		/*
1705 		 * When bus clash or parity errors are detected, such errors
1706 		 * are unlikely to be recoverable errors.
1707 		 * TODO: In such scenario, reset bus. Make this configurable
1708 		 * via sysfs property with bus reset being the default.
1709 		 */
1710 
1711 		if (buf & SDW_SCP_INT1_IMPL_DEF) {
1712 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1713 				dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1714 				slave_notify = true;
1715 			}
1716 			clear |= SDW_SCP_INT1_IMPL_DEF;
1717 		}
1718 
1719 		/* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1720 		if (sdca_cascade)
1721 			slave_notify = true;
1722 
1723 		/* Check port 0 - 3 interrupts */
1724 		port = buf & SDW_SCP_INT1_PORT0_3;
1725 
1726 		/* To get port number corresponding to bits, shift it */
1727 		port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1728 		for_each_set_bit(bit, &port, 8) {
1729 			sdw_handle_port_interrupt(slave, bit,
1730 						  &port_status[bit]);
1731 		}
1732 
1733 		/* Check if cascade 2 interrupt is present */
1734 		if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1735 			port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1736 			for_each_set_bit(bit, &port, 8) {
1737 				/* scp2 ports start from 4 */
1738 				port_num = bit + 4;
1739 				sdw_handle_port_interrupt(slave,
1740 						port_num,
1741 						&port_status[port_num]);
1742 			}
1743 		}
1744 
1745 		/* now check last cascade */
1746 		if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1747 			port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1748 			for_each_set_bit(bit, &port, 8) {
1749 				/* scp3 ports start from 11 */
1750 				port_num = bit + 11;
1751 				sdw_handle_port_interrupt(slave,
1752 						port_num,
1753 						&port_status[port_num]);
1754 			}
1755 		}
1756 
1757 		/* Update the Slave driver */
1758 		if (slave_notify) {
1759 			mutex_lock(&slave->sdw_dev_lock);
1760 
1761 			if (slave->probed) {
1762 				struct device *dev = &slave->dev;
1763 				struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1764 
1765 				if (slave->prop.use_domain_irq && slave->irq)
1766 					handle_nested_irq(slave->irq);
1767 
1768 				if (drv->ops && drv->ops->interrupt_callback) {
1769 					slave_intr.sdca_cascade = sdca_cascade;
1770 					slave_intr.control_port = clear;
1771 					memcpy(slave_intr.port, &port_status,
1772 					       sizeof(slave_intr.port));
1773 
1774 					drv->ops->interrupt_callback(slave, &slave_intr);
1775 				}
1776 			}
1777 
1778 			mutex_unlock(&slave->sdw_dev_lock);
1779 		}
1780 
1781 		/* Ack interrupt */
1782 		ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1783 		if (ret < 0) {
1784 			dev_err(&slave->dev,
1785 				"SDW_SCP_INT1 write failed:%d\n", ret);
1786 			goto io_err;
1787 		}
1788 
1789 		/* at this point all initial interrupt sources were handled */
1790 		slave->first_interrupt_done = true;
1791 
1792 		/*
1793 		 * Read status again to ensure no new interrupts arrived
1794 		 * while servicing interrupts.
1795 		 */
1796 		ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1797 		if (ret < 0) {
1798 			dev_err(&slave->dev,
1799 				"SDW_SCP_INT1 recheck read failed:%d\n", ret);
1800 			goto io_err;
1801 		}
1802 		buf = ret;
1803 
1804 		ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1805 		if (ret < 0) {
1806 			dev_err(&slave->dev,
1807 				"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1808 			goto io_err;
1809 		}
1810 
1811 		if (slave->id.class_id) {
1812 			ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1813 			if (ret < 0) {
1814 				dev_err(&slave->dev,
1815 					"SDW_DP0_INT recheck read failed:%d\n", ret);
1816 				goto io_err;
1817 			}
1818 			sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1819 		}
1820 
1821 		/*
1822 		 * Make sure no interrupts are pending
1823 		 */
1824 		stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1825 
1826 		/*
1827 		 * Exit loop if Slave is continuously in ALERT state even
1828 		 * after servicing the interrupt multiple times.
1829 		 */
1830 		count++;
1831 
1832 		/* we can get alerts while processing so keep retrying */
1833 	} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1834 
1835 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1836 		dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1837 
1838 io_err:
1839 	pm_runtime_mark_last_busy(&slave->dev);
1840 	pm_runtime_put_autosuspend(&slave->dev);
1841 
1842 	return ret;
1843 }
1844 
sdw_update_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)1845 static int sdw_update_slave_status(struct sdw_slave *slave,
1846 				   enum sdw_slave_status status)
1847 {
1848 	int ret = 0;
1849 
1850 	mutex_lock(&slave->sdw_dev_lock);
1851 
1852 	if (slave->probed) {
1853 		struct device *dev = &slave->dev;
1854 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1855 
1856 		if (drv->ops && drv->ops->update_status)
1857 			ret = drv->ops->update_status(slave, status);
1858 	}
1859 
1860 	mutex_unlock(&slave->sdw_dev_lock);
1861 
1862 	return ret;
1863 }
1864 
1865 /**
1866  * sdw_handle_slave_status() - Handle Slave status
1867  * @bus: SDW bus instance
1868  * @status: Status for all Slave(s)
1869  */
sdw_handle_slave_status(struct sdw_bus * bus,enum sdw_slave_status status[])1870 int sdw_handle_slave_status(struct sdw_bus *bus,
1871 			    enum sdw_slave_status status[])
1872 {
1873 	enum sdw_slave_status prev_status;
1874 	struct sdw_slave *slave;
1875 	bool attached_initializing, id_programmed;
1876 	int i, ret = 0;
1877 
1878 	/* first check if any Slaves fell off the bus */
1879 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1880 		mutex_lock(&bus->bus_lock);
1881 		if (test_bit(i, bus->assigned) == false) {
1882 			mutex_unlock(&bus->bus_lock);
1883 			continue;
1884 		}
1885 		mutex_unlock(&bus->bus_lock);
1886 
1887 		slave = sdw_get_slave(bus, i);
1888 		if (!slave)
1889 			continue;
1890 
1891 		if (status[i] == SDW_SLAVE_UNATTACHED &&
1892 		    slave->status != SDW_SLAVE_UNATTACHED) {
1893 			dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1894 				 i, slave->status);
1895 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1896 
1897 			/* Ensure driver knows that peripheral unattached */
1898 			ret = sdw_update_slave_status(slave, status[i]);
1899 			if (ret < 0)
1900 				dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1901 		}
1902 	}
1903 
1904 	if (status[0] == SDW_SLAVE_ATTACHED) {
1905 		dev_dbg(bus->dev, "Slave attached, programming device number\n");
1906 
1907 		/*
1908 		 * Programming a device number will have side effects,
1909 		 * so we deal with other devices at a later time.
1910 		 * This relies on those devices reporting ATTACHED, which will
1911 		 * trigger another call to this function. This will only
1912 		 * happen if at least one device ID was programmed.
1913 		 * Error returns from sdw_program_device_num() are currently
1914 		 * ignored because there's no useful recovery that can be done.
1915 		 * Returning the error here could result in the current status
1916 		 * of other devices not being handled, because if no device IDs
1917 		 * were programmed there's nothing to guarantee a status change
1918 		 * to trigger another call to this function.
1919 		 */
1920 		sdw_program_device_num(bus, &id_programmed);
1921 		if (id_programmed)
1922 			return 0;
1923 	}
1924 
1925 	/* Continue to check other slave statuses */
1926 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1927 		mutex_lock(&bus->bus_lock);
1928 		if (test_bit(i, bus->assigned) == false) {
1929 			mutex_unlock(&bus->bus_lock);
1930 			continue;
1931 		}
1932 		mutex_unlock(&bus->bus_lock);
1933 
1934 		slave = sdw_get_slave(bus, i);
1935 		if (!slave)
1936 			continue;
1937 
1938 		attached_initializing = false;
1939 
1940 		switch (status[i]) {
1941 		case SDW_SLAVE_UNATTACHED:
1942 			if (slave->status == SDW_SLAVE_UNATTACHED)
1943 				break;
1944 
1945 			dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1946 				 i, slave->status);
1947 
1948 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1949 			break;
1950 
1951 		case SDW_SLAVE_ALERT:
1952 			ret = sdw_handle_slave_alerts(slave);
1953 			if (ret < 0)
1954 				dev_err(&slave->dev,
1955 					"Slave %d alert handling failed: %d\n",
1956 					i, ret);
1957 			break;
1958 
1959 		case SDW_SLAVE_ATTACHED:
1960 			if (slave->status == SDW_SLAVE_ATTACHED)
1961 				break;
1962 
1963 			prev_status = slave->status;
1964 			sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1965 
1966 			if (prev_status == SDW_SLAVE_ALERT)
1967 				break;
1968 
1969 			attached_initializing = true;
1970 
1971 			ret = sdw_initialize_slave(slave);
1972 			if (ret < 0)
1973 				dev_err(&slave->dev,
1974 					"Slave %d initialization failed: %d\n",
1975 					i, ret);
1976 
1977 			break;
1978 
1979 		default:
1980 			dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1981 				i, status[i]);
1982 			break;
1983 		}
1984 
1985 		ret = sdw_update_slave_status(slave, status[i]);
1986 		if (ret < 0)
1987 			dev_err(&slave->dev,
1988 				"Update Slave status failed:%d\n", ret);
1989 		if (attached_initializing) {
1990 			dev_dbg(&slave->dev,
1991 				"signaling initialization completion for Slave %d\n",
1992 				slave->dev_num);
1993 
1994 			complete_all(&slave->initialization_complete);
1995 
1996 			/*
1997 			 * If the manager became pm_runtime active, the peripherals will be
1998 			 * restarted and attach, but their pm_runtime status may remain
1999 			 * suspended. If the 'update_slave_status' callback initiates
2000 			 * any sort of deferred processing, this processing would not be
2001 			 * cancelled on pm_runtime suspend.
2002 			 * To avoid such zombie states, we queue a request to resume.
2003 			 * This would be a no-op in case the peripheral was being resumed
2004 			 * by e.g. the ALSA/ASoC framework.
2005 			 */
2006 			pm_request_resume(&slave->dev);
2007 		}
2008 	}
2009 
2010 	return ret;
2011 }
2012 EXPORT_SYMBOL(sdw_handle_slave_status);
2013 
sdw_clear_slave_status(struct sdw_bus * bus,u32 request)2014 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
2015 {
2016 	struct sdw_slave *slave;
2017 	int i;
2018 
2019 	/* Check all non-zero devices */
2020 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
2021 		mutex_lock(&bus->bus_lock);
2022 		if (test_bit(i, bus->assigned) == false) {
2023 			mutex_unlock(&bus->bus_lock);
2024 			continue;
2025 		}
2026 		mutex_unlock(&bus->bus_lock);
2027 
2028 		slave = sdw_get_slave(bus, i);
2029 		if (!slave)
2030 			continue;
2031 
2032 		if (slave->status != SDW_SLAVE_UNATTACHED) {
2033 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
2034 			slave->first_interrupt_done = false;
2035 			sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
2036 		}
2037 
2038 		/* keep track of request, used in pm_runtime resume */
2039 		slave->unattach_request = request;
2040 	}
2041 }
2042 EXPORT_SYMBOL(sdw_clear_slave_status);
2043 
sdw_bpt_send_async(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2044 int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2045 {
2046 	if (msg->len > SDW_BPT_MSG_MAX_BYTES) {
2047 		dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len);
2048 		return -EINVAL;
2049 	}
2050 
2051 	/* check device is enumerated */
2052 	if (slave->dev_num == SDW_ENUM_DEV_NUM ||
2053 	    slave->dev_num > SDW_MAX_DEVICES) {
2054 		dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num);
2055 		return -ENODEV;
2056 	}
2057 
2058 	/* make sure all callbacks are defined */
2059 	if (!bus->ops->bpt_send_async ||
2060 	    !bus->ops->bpt_wait) {
2061 		dev_err(bus->dev, "BPT callbacks not defined\n");
2062 		return -EOPNOTSUPP;
2063 	}
2064 
2065 	return bus->ops->bpt_send_async(bus, slave, msg);
2066 }
2067 EXPORT_SYMBOL(sdw_bpt_send_async);
2068 
sdw_bpt_wait(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2069 int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2070 {
2071 	return bus->ops->bpt_wait(bus, slave, msg);
2072 }
2073 EXPORT_SYMBOL(sdw_bpt_wait);
2074 
sdw_bpt_send_sync(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2075 int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2076 {
2077 	int ret;
2078 
2079 	ret = sdw_bpt_send_async(bus, slave, msg);
2080 	if (ret < 0)
2081 		return ret;
2082 
2083 	return sdw_bpt_wait(bus, slave, msg);
2084 }
2085 EXPORT_SYMBOL(sdw_bpt_send_sync);
2086