Lines Matching full:device

26  * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
39 #include <linux/device.h>
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
154 static struct dma_chan *dev_to_dma_chan(struct device *dev) in dev_to_dma_chan()
158 chan_dev = container_of(dev, typeof(*chan_dev), device); in dev_to_dma_chan()
162 static ssize_t memcpy_count_show(struct device *dev, in memcpy_count_show()
184 static ssize_t bytes_transferred_show(struct device *dev, in bytes_transferred_show()
206 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, in in_use_show()
232 static void chan_dev_release(struct device *dev) in chan_dev_release()
236 chan_dev = container_of(dev, typeof(*chan_dev), device); in chan_dev_release()
246 /* --- client and device registration --- */
304 int node = dev_to_node(chan->device->dev); in dma_chan_is_local()
322 struct dma_device *device; in min_chan() local
327 list_for_each_entry(device, &dma_device_list, global_node) { in min_chan()
328 if (!dma_has_cap(cap, device->cap_mask) || in min_chan()
329 dma_has_cap(DMA_PRIVATE, device->cap_mask)) in min_chan()
331 list_for_each_entry(chan, &device->channels, device_node) { in min_chan()
364 struct dma_device *device; in dma_channel_rebalance() local
373 list_for_each_entry(device, &dma_device_list, global_node) { in dma_channel_rebalance()
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_channel_rebalance()
376 list_for_each_entry(chan, &device->channels, device_node) in dma_channel_rebalance()
392 static int dma_device_satisfies_mask(struct dma_device *device, in dma_device_satisfies_mask() argument
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits, in dma_device_satisfies_mask()
404 return chan->device->owner; in dma_chan_to_owner()
425 struct dma_device *device = container_of(ref, struct dma_device, ref); in dma_device_release() local
427 list_del_rcu(&device->global_node); in dma_device_release()
430 if (device->device_release) in dma_device_release()
431 device->device_release(device); in dma_device_release()
434 static void dma_device_put(struct dma_device *device) in dma_device_put() argument
437 kref_put(&device->ref, dma_device_release); in dma_device_put()
460 ret = kref_get_unless_zero(&chan->device->ref); in dma_chan_get()
467 if (chan->device->device_alloc_chan_resources) { in dma_chan_get()
468 ret = chan->device->device_alloc_chan_resources(chan); in dma_chan_get()
473 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) in dma_chan_get()
481 dma_device_put(chan->device); in dma_chan_get()
502 if (!chan->client_count && chan->device->device_free_chan_resources) { in dma_chan_put()
505 chan->device->device_free_chan_resources(chan); in dma_chan_put()
515 dma_device_put(chan->device); in dma_chan_put()
528 dev_err(chan->device->dev, "%s: timeout!\n", __func__); in dma_sync_wait()
555 struct dma_device *device; in dma_issue_pending_all() local
559 list_for_each_entry_rcu(device, &dma_device_list, global_node) { in dma_issue_pending_all()
560 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_issue_pending_all()
562 list_for_each_entry(chan, &device->channels, device_node) in dma_issue_pending_all()
564 device->device_issue_pending(chan); in dma_issue_pending_all()
572 struct dma_device *device; in dma_get_slave_caps() local
577 device = chan->device; in dma_get_slave_caps()
580 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || in dma_get_slave_caps()
581 test_bit(DMA_CYCLIC, device->cap_mask.bits))) in dma_get_slave_caps()
589 if (!device->directions) in dma_get_slave_caps()
592 caps->src_addr_widths = device->src_addr_widths; in dma_get_slave_caps()
593 caps->dst_addr_widths = device->dst_addr_widths; in dma_get_slave_caps()
594 caps->directions = device->directions; in dma_get_slave_caps()
595 caps->min_burst = device->min_burst; in dma_get_slave_caps()
596 caps->max_burst = device->max_burst; in dma_get_slave_caps()
597 caps->max_sg_burst = device->max_sg_burst; in dma_get_slave_caps()
598 caps->residue_granularity = device->residue_granularity; in dma_get_slave_caps()
599 caps->descriptor_reuse = device->descriptor_reuse; in dma_get_slave_caps()
600 caps->cmd_pause = !!device->device_pause; in dma_get_slave_caps()
601 caps->cmd_resume = !!device->device_resume; in dma_get_slave_caps()
602 caps->cmd_terminate = !!device->device_terminate_all; in dma_get_slave_caps()
605 * DMA engine device might be configured with non-uniformly in dma_get_slave_caps()
606 * distributed slave capabilities per device channels. In this in dma_get_slave_caps()
611 if (device->device_caps) in dma_get_slave_caps()
612 device->device_caps(chan, caps); in dma_get_slave_caps()
655 static struct dma_chan *find_candidate(struct dma_device *device, in find_candidate() argument
659 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); in find_candidate()
668 dma_cap_set(DMA_PRIVATE, device->cap_mask); in find_candidate()
669 device->privatecnt++; in find_candidate()
674 dev_dbg(device->dev, "%s: %s module removed\n", in find_candidate()
676 list_del_rcu(&device->global_node); in find_candidate()
678 dev_dbg(device->dev, in find_candidate()
682 if (--device->privatecnt == 0) in find_candidate()
683 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in find_candidate()
704 struct dma_device *device = chan->device; in dma_get_slave_channel() local
706 dma_cap_set(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
707 device->privatecnt++; in dma_get_slave_channel()
710 dev_dbg(chan->device->dev, in dma_get_slave_channel()
714 if (--device->privatecnt == 0) in dma_get_slave_channel()
715 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
727 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) in dma_get_any_slave_channel() argument
738 chan = find_candidate(device, &mask, NULL, NULL); in dma_get_any_slave_channel()
751 * @np: device node to look for DMA channels
759 struct dma_device *device, *_d; in __dma_request_channel() local
764 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { in __dma_request_channel()
765 /* Finds a DMA controller with matching device node */ in __dma_request_channel()
766 if (np && device->dev->of_node && np != device->dev->of_node) in __dma_request_channel()
769 chan = find_candidate(device, mask, fn, fn_param); in __dma_request_channel()
786 static const struct dma_slave_map *dma_filter_match(struct dma_device *device, in dma_filter_match() argument
788 struct device *dev) in dma_filter_match()
792 if (!device->filter.mapcnt) in dma_filter_match()
795 for (i = 0; i < device->filter.mapcnt; i++) { in dma_filter_match()
796 const struct dma_slave_map *map = &device->filter.map[i]; in dma_filter_match()
808 * @dev: pointer to client device structure
813 struct dma_chan *dma_request_chan(struct device *dev, const char *name) in dma_request_chan()
818 /* If device-tree is present get slave info from here */ in dma_request_chan()
822 /* If device was enumerated by ACPI get slave info from here */ in dma_request_chan()
866 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, in dma_request_chan()
869 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) in dma_request_chan()
910 if (--chan->device->privatecnt == 0) in dma_release_channel()
911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); in dma_release_channel()
914 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); in dma_release_channel()
934 struct dma_device *device, *_d; in dmaengine_get() local
942 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { in dmaengine_get()
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_get()
945 list_for_each_entry(chan, &device->channels, device_node) { in dmaengine_get()
949 list_del_rcu(&device->global_node); in dmaengine_get()
952 dev_dbg(chan->device->dev, in dmaengine_get()
973 struct dma_device *device, *_d; in dmaengine_put() local
980 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { in dmaengine_put()
981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_put()
983 list_for_each_entry(chan, &device->channels, device_node) in dmaengine_put()
990 static bool device_has_all_tx_types(struct dma_device *device) in device_has_all_tx_types() argument
992 /* A device that satisfies this test has channels that will never cause in device_has_all_tx_types()
997 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) in device_has_all_tx_types()
1002 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) in device_has_all_tx_types()
1007 if (!dma_has_cap(DMA_XOR, device->cap_mask)) in device_has_all_tx_types()
1011 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) in device_has_all_tx_types()
1017 if (!dma_has_cap(DMA_PQ, device->cap_mask)) in device_has_all_tx_types()
1021 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) in device_has_all_tx_types()
1029 static int get_dma_id(struct dma_device *device) in get_dma_id() argument
1035 device->dev_id = rc; in get_dma_id()
1039 static int __dma_async_device_channel_register(struct dma_device *device, in __dma_async_device_channel_register() argument
1057 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_register()
1058 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); in __dma_async_device_channel_register()
1059 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_register()
1067 chan->dev->device.class = &dma_devclass; in __dma_async_device_channel_register()
1068 chan->dev->device.parent = device->dev; in __dma_async_device_channel_register()
1070 chan->dev->dev_id = device->dev_id; in __dma_async_device_channel_register()
1071 dev_set_name(&chan->dev->device, "dma%dchan%d", in __dma_async_device_channel_register()
1072 device->dev_id, chan->chan_id); in __dma_async_device_channel_register()
1073 rc = device_register(&chan->dev->device); in __dma_async_device_channel_register()
1077 device->chancnt++; in __dma_async_device_channel_register()
1082 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_register()
1083 ida_free(&device->chan_ida, chan->chan_id); in __dma_async_device_channel_register()
1084 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_register()
1092 int dma_async_device_channel_register(struct dma_device *device, in dma_async_device_channel_register() argument
1097 rc = __dma_async_device_channel_register(device, chan); in dma_async_device_channel_register()
1106 static void __dma_async_device_channel_unregister(struct dma_device *device, in __dma_async_device_channel_unregister() argument
1109 WARN_ONCE(!device->device_release && chan->client_count, in __dma_async_device_channel_unregister()
1114 device->chancnt--; in __dma_async_device_channel_unregister()
1117 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_unregister()
1118 ida_free(&device->chan_ida, chan->chan_id); in __dma_async_device_channel_unregister()
1119 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_unregister()
1120 device_unregister(&chan->dev->device); in __dma_async_device_channel_unregister()
1124 void dma_async_device_channel_unregister(struct dma_device *device, in dma_async_device_channel_unregister() argument
1127 __dma_async_device_channel_unregister(device, chan); in dma_async_device_channel_unregister()
1134 * @device: pointer to &struct dma_device
1140 int dma_async_device_register(struct dma_device *device) in dma_async_device_register() argument
1145 if (!device) in dma_async_device_register()
1148 /* validate device routines */ in dma_async_device_register()
1149 if (!device->dev) { in dma_async_device_register()
1154 device->owner = device->dev->driver->owner; in dma_async_device_register()
1156 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { in dma_async_device_register()
1157 dev_err(device->dev, in dma_async_device_register()
1158 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1163 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { in dma_async_device_register()
1164 dev_err(device->dev, in dma_async_device_register()
1165 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1170 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { in dma_async_device_register()
1171 dev_err(device->dev, in dma_async_device_register()
1172 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1177 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { in dma_async_device_register()
1178 dev_err(device->dev, in dma_async_device_register()
1179 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1184 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { in dma_async_device_register()
1185 dev_err(device->dev, in dma_async_device_register()
1186 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1191 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { in dma_async_device_register()
1192 dev_err(device->dev, in dma_async_device_register()
1193 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1198 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { in dma_async_device_register()
1199 dev_err(device->dev, in dma_async_device_register()
1200 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1205 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { in dma_async_device_register()
1206 dev_err(device->dev, in dma_async_device_register()
1207 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1212 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { in dma_async_device_register()
1213 dev_err(device->dev, in dma_async_device_register()
1214 "Device claims capability %s, but op is not defined\n", in dma_async_device_register()
1220 if (!device->device_tx_status) { in dma_async_device_register()
1221 dev_err(device->dev, "Device tx_status is not defined\n"); in dma_async_device_register()
1226 if (!device->device_issue_pending) { in dma_async_device_register()
1227 dev_err(device->dev, "Device issue_pending is not defined\n"); in dma_async_device_register()
1231 if (!device->device_release) in dma_async_device_register()
1232 dev_dbg(device->dev, in dma_async_device_register()
1233 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); in dma_async_device_register()
1235 kref_init(&device->ref); in dma_async_device_register()
1240 if (device_has_all_tx_types(device)) in dma_async_device_register()
1241 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); in dma_async_device_register()
1243 rc = get_dma_id(device); in dma_async_device_register()
1247 mutex_init(&device->chan_mutex); in dma_async_device_register()
1248 ida_init(&device->chan_ida); in dma_async_device_register()
1251 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1252 rc = __dma_async_device_channel_register(device, chan); in dma_async_device_register()
1259 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1260 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1274 list_add_tail_rcu(&device->global_node, &dma_device_list); in dma_async_device_register()
1275 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1276 device->privatecnt++; /* Always private */ in dma_async_device_register()
1280 dmaengine_debug_register(device); in dma_async_device_register()
1286 if (!device->chancnt) { in dma_async_device_register()
1287 ida_free(&dma_ida, device->dev_id); in dma_async_device_register()
1291 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1297 device_unregister(&chan->dev->device); in dma_async_device_register()
1305 * dma_async_device_unregister - unregister a DMA device
1306 * @device: pointer to &struct dma_device
1311 void dma_async_device_unregister(struct dma_device *device) in dma_async_device_unregister() argument
1315 dmaengine_debug_unregister(device); in dma_async_device_unregister()
1317 list_for_each_entry_safe(chan, n, &device->channels, device_node) in dma_async_device_unregister()
1318 __dma_async_device_channel_unregister(device, chan); in dma_async_device_unregister()
1322 * setting DMA_PRIVATE ensures the device being torn down will not in dma_async_device_unregister()
1325 dma_cap_set(DMA_PRIVATE, device->cap_mask); in dma_async_device_unregister()
1327 ida_free(&dma_ida, device->dev_id); in dma_async_device_unregister()
1328 dma_device_put(device); in dma_async_device_unregister()
1333 static void dmam_device_release(struct device *dev, void *res) in dmam_device_release()
1335 struct dma_device *device; in dmam_device_release() local
1337 device = *(struct dma_device **)res; in dmam_device_release()
1338 dma_async_device_unregister(device); in dmam_device_release()
1343 * @device: pointer to &struct dma_device
1347 int dmaenginem_async_device_register(struct dma_device *device) in dmaenginem_async_device_register() argument
1356 ret = dma_async_device_register(device); in dmaenginem_async_device_register()
1358 *(struct dma_device **)p = device; in dmaenginem_async_device_register()
1359 devres_add(device->dev, p); in dmaenginem_async_device_register()
1409 struct device *dev = unmap->dev; in dmaengine_unmap()
1480 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) in dmaengine_get_unmap_data()
1594 dev_err(tx->chan->device->dev, in dma_wait_for_async_tx()
1642 chan->device->device_issue_pending(chan); in dma_run_dependencies()