/linux/drivers/crypto/virtio/ |
H A D | virtio_crypto_mgr.c | 151 struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev; in virtcrypto_get_dev_node() local 156 list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) { in virtcrypto_get_dev_node() 158 if ((node == dev_to_node(&tmp_dev->vdev->dev) || in virtcrypto_get_dev_node() 159 dev_to_node(&tmp_dev->vdev->dev) < 0) && in virtcrypto_get_dev_node() 160 virtcrypto_dev_started(tmp_dev) && in virtcrypto_get_dev_node() 161 virtcrypto_algo_is_supported(tmp_dev, service, algo)) { in virtcrypto_get_dev_node() 162 ctr = atomic_read(&tmp_dev->ref_count); in virtcrypto_get_dev_node() 164 vcrypto_dev = tmp_dev; in virtcrypto_get_dev_node() 174 list_for_each_entry(tmp_dev, in virtcrypto_get_dev_node() 176 if (virtcrypto_dev_started(tmp_dev) in virtcrypto_get_dev_node() [all...] |
/linux/drivers/crypto/intel/qat/qat_common/ |
H A D | qat_crypto.c | 53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev; in qat_crypto_get_instance_node() local 57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { in qat_crypto_get_instance_node() 60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) || in qat_crypto_get_instance_node() 61 dev_to_node(&GET_DEV(tmp_dev)) < 0) && in qat_crypto_get_instance_node() 62 adf_dev_started(tmp_dev) && in qat_crypto_get_instance_node() 63 !list_empty(&tmp_dev->crypto_list)) { in qat_crypto_get_instance_node() 64 ctr = atomic_read(&tmp_dev->ref_count); in qat_crypto_get_instance_node() 66 accel_dev = tmp_dev; in qat_crypto_get_instance_node() 75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { in qat_crypto_get_instance_node() 76 if (adf_dev_started(tmp_dev) in qat_crypto_get_instance_node() [all...] |
/linux/drivers/md/ |
H A D | md-linear.c | 234 struct dev_info *tmp_dev; in linear_make_request() local 242 tmp_dev = which_dev(mddev, bio_sector); in linear_make_request() 243 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; in linear_make_request() 244 end_sector = tmp_dev->end_sector; in linear_make_request() 245 data_offset = tmp_dev->rdev->data_offset; in linear_make_request() 251 if (unlikely(is_rdev_broken(tmp_dev->rdev))) { in linear_make_request() 252 md_error(mddev, tmp_dev->rdev); in linear_make_request() 274 bio_set_dev(bio, tmp_dev->rdev->bdev); in linear_make_request() 295 tmp_dev in linear_make_request() [all...] |
H A D | raid0.c | 557 struct md_rdev *tmp_dev; in raid0_map_submit_bio() local 566 tmp_dev = map_sector(mddev, zone, bio_sector, §or); in raid0_map_submit_bio() 569 tmp_dev = map_sector(mddev, zone, sector, §or); in raid0_map_submit_bio() 577 if (unlikely(is_rdev_broken(tmp_dev))) { in raid0_map_submit_bio() 579 md_error(mddev, tmp_dev); in raid0_map_submit_bio() 583 bio_set_dev(bio, tmp_dev->bdev); in raid0_map_submit_bio() 585 tmp_dev->data_offset; in raid0_map_submit_bio()
|
/linux/drivers/firmware/arm_ffa/ |
H A D | bus.c | 174 struct ffa_device *tmp_dev; in ffa_device_is_valid() local 178 tmp_dev = to_ffa_dev(dev); in ffa_device_is_valid() 179 if (tmp_dev == ffa_dev) { in ffa_device_is_valid()
|
/linux/drivers/pcmcia/ |
H A D | ds.c | 217 struct device *tmp_dev; in pcmcia_get_dev() local 218 tmp_dev = get_device(&p_dev->dev); in pcmcia_get_dev() 219 if (!tmp_dev) in pcmcia_get_dev() 221 return to_pcmcia_dev(tmp_dev); in pcmcia_get_dev() 482 struct pcmcia_device *p_dev, *tmp_dev; in pcmcia_device_add() local 528 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) in pcmcia_device_add() 529 if (p_dev->func == tmp_dev->func) { in pcmcia_device_add() 530 p_dev->function_config = tmp_dev->function_config; in pcmcia_device_add() 531 p_dev->irq = tmp_dev->irq; in pcmcia_device_add()
|
/linux/drivers/s390/block/ |
H A D | dasd_devmap.c | 663 struct dasd_device *tmp_dev; in dasd_devmap_check_copy_relation() local 714 tmp_dev = device; in dasd_devmap_check_copy_relation() 716 tmp_dev = copy->entry[j].device; in dasd_devmap_check_copy_relation() 718 if (!tmp_dev) in dasd_devmap_check_copy_relation() 721 if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat)) in dasd_devmap_check_copy_relation() 725 dev_warn(&tmp_dev->cdev->dev, in dasd_devmap_check_copy_relation()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
H A D | lag.c | 1379 struct mlx5_core_dev *tmp_dev; in __mlx5_lag_dev_add_mdev() local 1381 tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos); in __mlx5_lag_dev_add_mdev() 1382 if (tmp_dev) in __mlx5_lag_dev_add_mdev() 1383 ldev = mlx5_lag_dev(tmp_dev); in __mlx5_lag_dev_add_mdev()
|
/linux/drivers/iommu/intel/ |
H A D | dmar.c | 108 struct device *tmp_dev; in dmar_free_dev_scope() local 111 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) in dmar_free_dev_scope() 112 put_device(tmp_dev); in dmar_free_dev_scope()
|
/linux/drivers/target/ |
H A D | target_core_user.c | 3265 struct tcmu_dev *udev, *tmp_dev; in check_timedout_devices() local 3272 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { in check_timedout_devices()
|