Lines Matching +full:0 +full:xd
45 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
46 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
85 req->result.err = 0; in tb_xdomain_copy()
114 * @xd: XDomain to send the message
122 * Return: %0 in case of success and negative errno in case of failure
124 int tb_xdomain_response(struct tb_xdomain *xd, const void *response, in tb_xdomain_response() argument
127 return __tb_xdomain_response(xd->tb->ctl, response, size, type); in tb_xdomain_response()
161 * @xd: XDomain to send the request
174 * Return: %0 in case of success and negative errno in case of failure
176 int tb_xdomain_request(struct tb_xdomain *xd, const void *request, in tb_xdomain_request() argument
181 return __tb_xdomain_request(xd->tb->ctl, request, request_size, in tb_xdomain_request()
207 return 0; in tb_xdp_handle_error()
223 return 0; in tb_xdp_handle_error()
233 memset(&req, 0, sizeof(req)); in tb_xdp_uuid_request()
237 memset(&res, 0, sizeof(res)); in tb_xdp_uuid_request()
250 return 0; in tb_xdp_uuid_request()
258 memset(&res, 0, sizeof(res)); in tb_xdp_uuid_response()
275 memset(&res, 0, sizeof(res)); in tb_xdp_error_response()
300 memset(&req, 0, sizeof(req)); in tb_xdp_properties_request()
306 len = 0; in tb_xdp_properties_request()
307 data_len = 0; in tb_xdp_properties_request()
393 return 0; in tb_xdp_properties_response()
438 memset(&req, 0, sizeof(req)); in tb_xdp_properties_changed_request()
443 memset(&res, 0, sizeof(res)); in tb_xdp_properties_changed_request()
459 memset(&res, 0, sizeof(res)); in tb_xdp_properties_changed_response()
486 return 0; in tb_register_protocol_handler()
509 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); in rebuild_property_block()
510 if (ret < 0) in rebuild_property_block()
530 return 0; in rebuild_property_block()
561 int ret = 0; in tb_xdp_handle_request()
592 struct tb_xdomain *xd; in tb_xdp_handle_request() local
601 xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); in tb_xdp_handle_request()
602 if (xd) { in tb_xdp_handle_request()
603 queue_delayed_work(tb->wq, &xd->get_properties_work, in tb_xdp_handle_request()
605 tb_xdomain_put(xd); in tb_xdp_handle_request()
743 return sprintf(buf, "0x%08x\n", svc->prtcstns); in prtcstns_show()
778 struct tb_xdomain *xd = tb_service_parent(svc); in tb_service_release() local
780 ida_simple_remove(&xd->service_ids, svc->id); in tb_service_release()
795 struct tb_xdomain *xd = data; in remove_missing_service() local
800 return 0; in remove_missing_service()
802 if (!tb_property_find(xd->properties, svc->key, in remove_missing_service()
806 return 0; in remove_missing_service()
816 return 0; in find_service()
845 return 0; in populate_service()
848 static void enumerate_services(struct tb_xdomain *xd) in enumerate_services() argument
859 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); in enumerate_services()
862 tb_property_for_each(xd->properties, p) { in enumerate_services()
867 dev = device_find_child(&xd->dev, p, find_service); in enumerate_services()
882 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); in enumerate_services()
883 if (id < 0) { in enumerate_services()
891 svc->dev.parent = &xd->dev; in enumerate_services()
892 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); in enumerate_services()
901 static int populate_properties(struct tb_xdomain *xd, in populate_properties() argument
910 xd->device = p->value.immediate; in populate_properties()
915 xd->vendor = p->value.immediate; in populate_properties()
917 kfree(xd->device_name); in populate_properties()
918 xd->device_name = NULL; in populate_properties()
919 kfree(xd->vendor_name); in populate_properties()
920 xd->vendor_name = NULL; in populate_properties()
925 xd->device_name = kstrdup(p->value.text, GFP_KERNEL); in populate_properties()
928 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); in populate_properties()
930 return 0; in populate_properties()
933 /* Called with @xd->lock held */
934 static void tb_xdomain_restore_paths(struct tb_xdomain *xd) in tb_xdomain_restore_paths() argument
936 if (!xd->resume) in tb_xdomain_restore_paths()
939 xd->resume = false; in tb_xdomain_restore_paths()
940 if (xd->transmit_path) { in tb_xdomain_restore_paths()
941 dev_dbg(&xd->dev, "re-establishing DMA path\n"); in tb_xdomain_restore_paths()
942 tb_domain_approve_xdomain_paths(xd->tb, xd); in tb_xdomain_restore_paths()
948 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_get_uuid() local
950 struct tb *tb = xd->tb; in tb_xdomain_get_uuid()
954 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); in tb_xdomain_get_uuid()
955 if (ret < 0) { in tb_xdomain_get_uuid()
956 if (xd->uuid_retries-- > 0) { in tb_xdomain_get_uuid()
957 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, in tb_xdomain_get_uuid()
960 dev_dbg(&xd->dev, "failed to read remote UUID\n"); in tb_xdomain_get_uuid()
965 if (uuid_equal(&uuid, xd->local_uuid)) { in tb_xdomain_get_uuid()
966 dev_dbg(&xd->dev, "intra-domain loop detected\n"); in tb_xdomain_get_uuid()
975 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { in tb_xdomain_get_uuid()
976 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); in tb_xdomain_get_uuid()
977 xd->is_unplugged = true; in tb_xdomain_get_uuid()
982 if (!xd->remote_uuid) { in tb_xdomain_get_uuid()
983 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); in tb_xdomain_get_uuid()
984 if (!xd->remote_uuid) in tb_xdomain_get_uuid()
989 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in tb_xdomain_get_uuid()
991 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in tb_xdomain_get_uuid()
997 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_get_properties() local
1000 struct tb *tb = xd->tb; in tb_xdomain_get_properties()
1003 u32 gen = 0; in tb_xdomain_get_properties()
1006 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, in tb_xdomain_get_properties()
1007 xd->remote_uuid, xd->properties_retries, in tb_xdomain_get_properties()
1009 if (ret < 0) { in tb_xdomain_get_properties()
1010 if (xd->properties_retries-- > 0) { in tb_xdomain_get_properties()
1011 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in tb_xdomain_get_properties()
1015 dev_err(&xd->dev, in tb_xdomain_get_properties()
1017 xd->remote_uuid); in tb_xdomain_get_properties()
1022 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; in tb_xdomain_get_properties()
1024 mutex_lock(&xd->lock); in tb_xdomain_get_properties()
1027 if (xd->properties && gen <= xd->property_block_gen) { in tb_xdomain_get_properties()
1034 tb_xdomain_restore_paths(xd); in tb_xdomain_get_properties()
1040 dev_err(&xd->dev, "failed to parse XDomain properties\n"); in tb_xdomain_get_properties()
1044 ret = populate_properties(xd, dir); in tb_xdomain_get_properties()
1046 dev_err(&xd->dev, "missing XDomain properties in response\n"); in tb_xdomain_get_properties()
1051 if (xd->properties) { in tb_xdomain_get_properties()
1052 tb_property_free_dir(xd->properties); in tb_xdomain_get_properties()
1056 xd->properties = dir; in tb_xdomain_get_properties()
1057 xd->property_block_gen = gen; in tb_xdomain_get_properties()
1059 tb_xdomain_restore_paths(xd); in tb_xdomain_get_properties()
1061 mutex_unlock(&xd->lock); in tb_xdomain_get_properties()
1071 if (device_add(&xd->dev)) { in tb_xdomain_get_properties()
1072 dev_err(&xd->dev, "failed to add XDomain device\n"); in tb_xdomain_get_properties()
1076 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); in tb_xdomain_get_properties()
1079 enumerate_services(xd); in tb_xdomain_get_properties()
1086 mutex_unlock(&xd->lock); in tb_xdomain_get_properties()
1091 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_properties_changed() local
1095 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, in tb_xdomain_properties_changed()
1096 xd->properties_changed_retries, xd->local_uuid); in tb_xdomain_properties_changed()
1098 if (xd->properties_changed_retries-- > 0) in tb_xdomain_properties_changed()
1099 queue_delayed_work(xd->tb->wq, in tb_xdomain_properties_changed()
1100 &xd->properties_changed_work, in tb_xdomain_properties_changed()
1105 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; in tb_xdomain_properties_changed()
1111 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in device_show() local
1113 return sprintf(buf, "%#x\n", xd->device); in device_show()
1120 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in device_name_show() local
1123 if (mutex_lock_interruptible(&xd->lock)) in device_name_show()
1125 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); in device_name_show()
1126 mutex_unlock(&xd->lock); in device_name_show()
1135 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in vendor_show() local
1137 return sprintf(buf, "%#x\n", xd->vendor); in vendor_show()
1144 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in vendor_name_show() local
1147 if (mutex_lock_interruptible(&xd->lock)) in vendor_name_show()
1149 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); in vendor_name_show()
1150 mutex_unlock(&xd->lock); in vendor_name_show()
1159 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in unique_id_show() local
1161 return sprintf(buf, "%pUb\n", xd->remote_uuid); in unique_id_show()
1185 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in tb_xdomain_release() local
1187 put_device(xd->dev.parent); in tb_xdomain_release()
1189 tb_property_free_dir(xd->properties); in tb_xdomain_release()
1190 ida_destroy(&xd->service_ids); in tb_xdomain_release()
1192 kfree(xd->local_uuid); in tb_xdomain_release()
1193 kfree(xd->remote_uuid); in tb_xdomain_release()
1194 kfree(xd->device_name); in tb_xdomain_release()
1195 kfree(xd->vendor_name); in tb_xdomain_release()
1196 kfree(xd); in tb_xdomain_release()
1199 static void start_handshake(struct tb_xdomain *xd) in start_handshake() argument
1201 xd->uuid_retries = XDOMAIN_UUID_RETRIES; in start_handshake()
1202 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; in start_handshake()
1203 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; in start_handshake()
1205 if (xd->needs_uuid) { in start_handshake()
1206 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, in start_handshake()
1210 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in start_handshake()
1212 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in start_handshake()
1217 static void stop_handshake(struct tb_xdomain *xd) in stop_handshake() argument
1219 xd->uuid_retries = 0; in stop_handshake()
1220 xd->properties_retries = 0; in stop_handshake()
1221 xd->properties_changed_retries = 0; in stop_handshake()
1223 cancel_delayed_work_sync(&xd->get_uuid_work); in stop_handshake()
1224 cancel_delayed_work_sync(&xd->get_properties_work); in stop_handshake()
1225 cancel_delayed_work_sync(&xd->properties_changed_work); in stop_handshake()
1231 return 0; in tb_xdomain_suspend()
1236 struct tb_xdomain *xd = tb_to_xdomain(dev); in tb_xdomain_resume() local
1242 xd->resume = true; in tb_xdomain_resume()
1243 start_handshake(xd); in tb_xdomain_resume()
1245 return 0; in tb_xdomain_resume()
1276 struct tb_xdomain *xd; in tb_xdomain_alloc() local
1283 xd = kzalloc(sizeof(*xd), GFP_KERNEL); in tb_xdomain_alloc()
1284 if (!xd) in tb_xdomain_alloc()
1287 xd->tb = tb; in tb_xdomain_alloc()
1288 xd->route = route; in tb_xdomain_alloc()
1289 ida_init(&xd->service_ids); in tb_xdomain_alloc()
1290 mutex_init(&xd->lock); in tb_xdomain_alloc()
1291 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); in tb_xdomain_alloc()
1292 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); in tb_xdomain_alloc()
1293 INIT_DELAYED_WORK(&xd->properties_changed_work, in tb_xdomain_alloc()
1296 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); in tb_xdomain_alloc()
1297 if (!xd->local_uuid) in tb_xdomain_alloc()
1301 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), in tb_xdomain_alloc()
1303 if (!xd->remote_uuid) in tb_xdomain_alloc()
1306 xd->needs_uuid = true; in tb_xdomain_alloc()
1309 device_initialize(&xd->dev); in tb_xdomain_alloc()
1310 xd->dev.parent = get_device(parent); in tb_xdomain_alloc()
1311 xd->dev.bus = &tb_bus_type; in tb_xdomain_alloc()
1312 xd->dev.type = &tb_xdomain_type; in tb_xdomain_alloc()
1313 xd->dev.groups = xdomain_attr_groups; in tb_xdomain_alloc()
1314 dev_set_name(&xd->dev, "%u-%llx", tb->index, route); in tb_xdomain_alloc()
1320 pm_runtime_set_active(&xd->dev); in tb_xdomain_alloc()
1321 pm_runtime_get_noresume(&xd->dev); in tb_xdomain_alloc()
1322 pm_runtime_enable(&xd->dev); in tb_xdomain_alloc()
1324 return xd; in tb_xdomain_alloc()
1327 kfree(xd->local_uuid); in tb_xdomain_alloc()
1329 kfree(xd); in tb_xdomain_alloc()
1336 * @xd: XDomain to add
1343 void tb_xdomain_add(struct tb_xdomain *xd) in tb_xdomain_add() argument
1346 start_handshake(xd); in tb_xdomain_add()
1352 return 0; in unregister_service()
1357 * @xd: XDomain to remove
1360 * along with any services from the bus. When the last reference to @xd
1363 void tb_xdomain_remove(struct tb_xdomain *xd) in tb_xdomain_remove() argument
1365 stop_handshake(xd); in tb_xdomain_remove()
1367 device_for_each_child_reverse(&xd->dev, xd, unregister_service); in tb_xdomain_remove()
1374 pm_runtime_disable(&xd->dev); in tb_xdomain_remove()
1375 pm_runtime_put_noidle(&xd->dev); in tb_xdomain_remove()
1376 pm_runtime_set_suspended(&xd->dev); in tb_xdomain_remove()
1378 if (!device_is_registered(&xd->dev)) in tb_xdomain_remove()
1379 put_device(&xd->dev); in tb_xdomain_remove()
1381 device_unregister(&xd->dev); in tb_xdomain_remove()
1386 * @xd: XDomain connection
1398 * Return: %0 in case of success and negative errno in case of error
1400 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, in tb_xdomain_enable_paths() argument
1406 mutex_lock(&xd->lock); in tb_xdomain_enable_paths()
1408 if (xd->transmit_path) { in tb_xdomain_enable_paths()
1409 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; in tb_xdomain_enable_paths()
1413 xd->transmit_path = transmit_path; in tb_xdomain_enable_paths()
1414 xd->transmit_ring = transmit_ring; in tb_xdomain_enable_paths()
1415 xd->receive_path = receive_path; in tb_xdomain_enable_paths()
1416 xd->receive_ring = receive_ring; in tb_xdomain_enable_paths()
1418 ret = tb_domain_approve_xdomain_paths(xd->tb, xd); in tb_xdomain_enable_paths()
1421 mutex_unlock(&xd->lock); in tb_xdomain_enable_paths()
1429 * @xd: XDomain connection
1434 * Return: %0 in case of success and negative errno in case of error
1436 int tb_xdomain_disable_paths(struct tb_xdomain *xd) in tb_xdomain_disable_paths() argument
1438 int ret = 0; in tb_xdomain_disable_paths()
1440 mutex_lock(&xd->lock); in tb_xdomain_disable_paths()
1441 if (xd->transmit_path) { in tb_xdomain_disable_paths()
1442 xd->transmit_path = 0; in tb_xdomain_disable_paths()
1443 xd->transmit_ring = 0; in tb_xdomain_disable_paths()
1444 xd->receive_path = 0; in tb_xdomain_disable_paths()
1445 xd->receive_ring = 0; in tb_xdomain_disable_paths()
1447 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); in tb_xdomain_disable_paths()
1449 mutex_unlock(&xd->lock); in tb_xdomain_disable_paths()
1468 struct tb_xdomain *xd; in switch_find_xdomain() local
1471 xd = port->xdomain; in switch_find_xdomain()
1474 if (xd->remote_uuid && in switch_find_xdomain()
1475 uuid_equal(xd->remote_uuid, lookup->uuid)) in switch_find_xdomain()
1476 return xd; in switch_find_xdomain()
1478 lookup->link == xd->link && in switch_find_xdomain()
1479 lookup->depth == xd->depth) { in switch_find_xdomain()
1480 return xd; in switch_find_xdomain()
1482 lookup->route == xd->route) { in switch_find_xdomain()
1483 return xd; in switch_find_xdomain()
1486 xd = switch_find_xdomain(port->remote->sw, lookup); in switch_find_xdomain()
1487 if (xd) in switch_find_xdomain()
1488 return xd; in switch_find_xdomain()
1513 struct tb_xdomain *xd; in tb_xdomain_find_by_uuid() local
1515 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_uuid()
1518 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_uuid()
1519 return tb_xdomain_get(xd); in tb_xdomain_find_by_uuid()
1543 struct tb_xdomain *xd; in tb_xdomain_find_by_link_depth() local
1545 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_link_depth()
1549 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_link_depth()
1550 return tb_xdomain_get(xd); in tb_xdomain_find_by_link_depth()
1571 struct tb_xdomain *xd; in tb_xdomain_find_by_route() local
1573 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_route()
1576 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_route()
1577 return tb_xdomain_get(xd); in tb_xdomain_find_by_route()
1587 int ret = 0; in tb_xdomain_handle_request()
1621 return ret > 0; in tb_xdomain_handle_request()
1626 struct tb_xdomain *xd; in update_xdomain() local
1628 xd = tb_to_xdomain(dev); in update_xdomain()
1629 if (xd) { in update_xdomain()
1630 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in update_xdomain()
1634 return 0; in update_xdomain()
1665 * Return: %0 on success and negative errno on failure
1696 return 0; in tb_register_property_dir()
1714 int ret = 0; in tb_unregister_property_dir()
1742 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); in tb_xdomain_init()
1743 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); in tb_xdomain_init()
1745 return 0; in tb_xdomain_init()