Lines Matching +full:bus +full:- +full:range

1 // SPDX-License-Identifier: GPL-2.0
38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
82 if (!table->n_yes_ranges)
85 return regmap_reg_in_ranges(reg, table->yes_ranges,
86 table->n_yes_ranges);
92 if (map->max_register_is_set && reg > map->max_register)
95 if (map->writeable_reg)
96 return map->writeable_reg(map->dev, reg);
98 if (map->wr_table)
99 return regmap_check_range_table(map, reg, map->wr_table);
109 if (map->cache_type == REGCACHE_NONE)
112 if (!map->cache_ops)
115 if (map->max_register_is_set && reg > map->max_register)
118 map->lock(map->lock_arg);
120 map->unlock(map->lock_arg);
129 if (!map->reg_read)
132 if (map->max_register_is_set && reg > map->max_register)
135 if (map->format.format_write)
138 if (map->readable_reg)
139 return map->readable_reg(map->dev, reg);
141 if (map->rd_table)
142 return regmap_check_range_table(map, reg, map->rd_table);
149 if (!map->format.format_write && !regmap_readable(map, reg))
152 if (map->volatile_reg)
153 return map->volatile_reg(map->dev, reg);
155 if (map->volatile_table)
156 return regmap_check_range_table(map, reg, map->volatile_table);
158 if (map->cache_ops)
169 if (map->precious_reg)
170 return map->precious_reg(map->dev, reg);
172 if (map->precious_table)
173 return regmap_check_range_table(map, reg, map->precious_table);
180 if (map->writeable_noinc_reg)
181 return map->writeable_noinc_reg(map->dev, reg);
183 if (map->wr_noinc_table)
184 return regmap_check_range_table(map, reg, map->wr_noinc_table);
191 if (map->readable_noinc_reg)
192 return map->readable_noinc_reg(map->dev, reg);
194 if (map->rd_noinc_table)
195 return regmap_check_range_table(map, reg, map->rd_noinc_table);
215 u8 *out = map->work_buf;
227 u8 *out = map->work_buf;
235 __be16 *out = map->work_buf;
242 __be16 *out = map->work_buf;
249 u8 *out = map->work_buf;
259 u8 *out = map->work_buf;
398 hwspin_lock_timeout(map->hwlock, UINT_MAX);
405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
412 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
413 &map->spinlock_flags);
420 hwspin_unlock(map->hwlock);
427 hwspin_unlock_irq(map->hwlock);
434 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
445 mutex_lock(&map->mutex);
451 mutex_unlock(&map->mutex);
455 __acquires(&map->spinlock)
460 spin_lock_irqsave(&map->spinlock, flags);
461 map->spinlock_flags = flags;
465 __releases(&map->spinlock)
468 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
472 __acquires(&map->raw_spinlock)
477 raw_spin_lock_irqsave(&map->raw_spinlock, flags);
478 map->raw_spinlock_flags = flags;
482 __releases(&map->raw_spinlock)
485 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
500 struct rb_root *root = &map->range_tree;
501 struct rb_node **new = &(root->rb_node), *parent = NULL;
508 if (data->range_max < this->range_min)
509 new = &((*new)->rb_left);
510 else if (data->range_min > this->range_max)
511 new = &((*new)->rb_right);
516 rb_link_node(&data->node, parent, new);
517 rb_insert_color(&data->node, root);
525 struct rb_node *node = map->range_tree.rb_node;
531 if (reg < this->range_min)
532 node = node->rb_left;
533 else if (reg > this->range_max)
534 node = node->rb_right;
547 next = rb_first(&map->range_tree);
550 next = rb_next(&range_node->node);
551 rb_erase(&range_node->node, &map->range_tree);
555 kfree(map->selector_work_buf);
560 if (config->name) {
561 const char *name = kstrdup_const(config->name, GFP_KERNEL);
564 return -ENOMEM;
566 kfree_const(map->name);
567 map->name = name;
579 map->dev = dev;
592 return -ENOMEM;
609 dev_get_regmap_match, (void *)map->name);
612 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
618 endian = config->reg_format_endian;
620 /* If the regmap config specified a non-default value, use that */
624 /* Retrieve the endianness specification from the bus config */
625 if (bus && bus->reg_format_endian_default)
626 endian = bus->reg_format_endian_default;
628 /* If the bus specified a non-default value, use that */
637 const struct regmap_bus *bus,
644 endian = config->val_format_endian;
646 /* If the regmap config specified a non-default value, use that */
651 if (fwnode_property_read_bool(fwnode, "big-endian"))
653 else if (fwnode_property_read_bool(fwnode, "little-endian"))
655 else if (fwnode_property_read_bool(fwnode, "native-endian"))
662 /* Retrieve the endianness specification from the bus config */
663 if (bus && bus->val_format_endian_default)
664 endian = bus->val_format_endian_default;
666 /* If the bus specified a non-default value, use that */
676 const struct regmap_bus *bus,
683 int ret = -EINVAL;
692 ret = -ENOMEM;
700 ret = -EINVAL; /* Later error paths rely on this */
702 if (config->disable_locking) {
703 map->lock = map->unlock = regmap_lock_unlock_none;
704 map->can_sleep = config->can_sleep;
706 } else if (config->lock && config->unlock) {
707 map->lock = config->lock;
708 map->unlock = config->unlock;
709 map->lock_arg = config->lock_arg;
710 map->can_sleep = config->can_sleep;
711 } else if (config->use_hwlock) {
712 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
713 if (!map->hwlock) {
714 ret = -ENXIO;
718 switch (config->hwlock_mode) {
720 map->lock = regmap_lock_hwlock_irqsave;
721 map->unlock = regmap_unlock_hwlock_irqrestore;
724 map->lock = regmap_lock_hwlock_irq;
725 map->unlock = regmap_unlock_hwlock_irq;
728 map->lock = regmap_lock_hwlock;
729 map->unlock = regmap_unlock_hwlock;
733 map->lock_arg = map;
735 if ((bus && bus->fast_io) ||
736 config->fast_io) {
737 if (config->use_raw_spinlock) {
738 raw_spin_lock_init(&map->raw_spinlock);
739 map->lock = regmap_lock_raw_spinlock;
740 map->unlock = regmap_unlock_raw_spinlock;
741 lockdep_set_class_and_name(&map->raw_spinlock,
744 spin_lock_init(&map->spinlock);
745 map->lock = regmap_lock_spinlock;
746 map->unlock = regmap_unlock_spinlock;
747 lockdep_set_class_and_name(&map->spinlock,
751 mutex_init(&map->mutex);
752 map->lock = regmap_lock_mutex;
753 map->unlock = regmap_unlock_mutex;
754 map->can_sleep = true;
755 lockdep_set_class_and_name(&map->mutex,
758 map->lock_arg = map;
759 map->lock_key = lock_key;
763 * When we write in fast-paths with regmap_bulk_write() don't allocate
766 if ((bus && bus->fast_io) || config->fast_io)
767 map->alloc_flags = GFP_ATOMIC;
769 map->alloc_flags = GFP_KERNEL;
771 map->reg_base = config->reg_base;
772 map->reg_shift = config->pad_bits % 8;
774 map->format.pad_bytes = config->pad_bits / 8;
775 map->format.reg_shift = config->reg_shift;
776 map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
777 map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
778 map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
779 if (config->reg_stride)
780 map->reg_stride = config->reg_stride;
782 map->reg_stride = 1;
783 if (is_power_of_2(map->reg_stride))
784 map->reg_stride_order = ilog2(map->reg_stride);
786 map->reg_stride_order = -1;
787 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
788 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
789 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
790 if (bus) {
791 map->max_raw_read = bus->max_raw_read;
792 map->max_raw_write = bus->max_raw_write;
793 } else if (config->max_raw_read && config->max_raw_write) {
794 map->max_raw_read = config->max_raw_read;
795 map->max_raw_write = config->max_raw_write;
797 map->dev = dev;
798 map->bus = bus;
799 map->bus_context = bus_context;
800 map->max_register = config->max_register;
801 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
802 map->wr_table = config->wr_table;
803 map->rd_table = config->rd_table;
804 map->volatile_table = config->volatile_table;
805 map->precious_table = config->precious_table;
806 map->wr_noinc_table = config->wr_noinc_table;
807 map->rd_noinc_table = config->rd_noinc_table;
808 map->writeable_reg = config->writeable_reg;
809 map->readable_reg = config->readable_reg;
810 map->volatile_reg = config->volatile_reg;
811 map->precious_reg = config->precious_reg;
812 map->writeable_noinc_reg = config->writeable_noinc_reg;
813 map->readable_noinc_reg = config->readable_noinc_reg;
814 map->cache_type = config->cache_type;
816 spin_lock_init(&map->async_lock);
817 INIT_LIST_HEAD(&map->async_list);
818 INIT_LIST_HEAD(&map->async_free);
819 init_waitqueue_head(&map->async_waitq);
821 if (config->read_flag_mask ||
822 config->write_flag_mask ||
823 config->zero_flag_mask) {
824 map->read_flag_mask = config->read_flag_mask;
825 map->write_flag_mask = config->write_flag_mask;
826 } else if (bus) {
827 map->read_flag_mask = bus->read_flag_mask;
830 if (config && config->read && config->write) {
831 map->reg_read = _regmap_bus_read;
832 if (config->reg_update_bits)
833 map->reg_update_bits = config->reg_update_bits;
836 map->read = config->read;
837 map->write = config->write;
841 } else if (!bus) {
842 map->reg_read = config->reg_read;
843 map->reg_write = config->reg_write;
844 map->reg_update_bits = config->reg_update_bits;
846 map->defer_caching = false;
848 } else if (!bus->read || !bus->write) {
849 map->reg_read = _regmap_bus_reg_read;
850 map->reg_write = _regmap_bus_reg_write;
851 map->reg_update_bits = bus->reg_update_bits;
853 map->defer_caching = false;
856 map->reg_read = _regmap_bus_read;
857 map->reg_update_bits = bus->reg_update_bits;
859 map->read = bus->read;
860 map->write = bus->write;
862 reg_endian = regmap_get_reg_endian(bus, config);
863 val_endian = regmap_get_val_endian(dev, bus, config);
866 switch (config->reg_bits + map->reg_shift) {
868 switch (config->val_bits) {
870 map->format.format_write = regmap_format_2_6_write;
878 switch (config->val_bits) {
880 map->format.format_write = regmap_format_4_12_write;
888 switch (config->val_bits) {
890 map->format.format_write = regmap_format_7_9_write;
893 map->format.format_write = regmap_format_7_17_write;
901 switch (config->val_bits) {
903 map->format.format_write = regmap_format_10_14_write;
911 switch (config->val_bits) {
913 map->format.format_write = regmap_format_12_20_write;
921 map->format.format_reg = regmap_format_8;
927 map->format.format_reg = regmap_format_16_be;
930 map->format.format_reg = regmap_format_16_le;
933 map->format.format_reg = regmap_format_16_native;
943 map->format.format_reg = regmap_format_24_be;
953 map->format.format_reg = regmap_format_32_be;
956 map->format.format_reg = regmap_format_32_le;
959 map->format.format_reg = regmap_format_32_native;
971 map->format.parse_inplace = regmap_parse_inplace_noop;
973 switch (config->val_bits) {
975 map->format.format_val = regmap_format_8;
976 map->format.parse_val = regmap_parse_8;
977 map->format.parse_inplace = regmap_parse_inplace_noop;
982 map->format.format_val = regmap_format_16_be;
983 map->format.parse_val = regmap_parse_16_be;
984 map->format.parse_inplace = regmap_parse_16_be_inplace;
987 map->format.format_val = regmap_format_16_le;
988 map->format.parse_val = regmap_parse_16_le;
989 map->format.parse_inplace = regmap_parse_16_le_inplace;
992 map->format.format_val = regmap_format_16_native;
993 map->format.parse_val = regmap_parse_16_native;
1002 map->format.format_val = regmap_format_24_be;
1003 map->format.parse_val = regmap_parse_24_be;
1012 map->format.format_val = regmap_format_32_be;
1013 map->format.parse_val = regmap_parse_32_be;
1014 map->format.parse_inplace = regmap_parse_32_be_inplace;
1017 map->format.format_val = regmap_format_32_le;
1018 map->format.parse_val = regmap_parse_32_le;
1019 map->format.parse_inplace = regmap_parse_32_le_inplace;
1022 map->format.format_val = regmap_format_32_native;
1023 map->format.parse_val = regmap_parse_32_native;
1031 if (map->format.format_write) {
1035 map->use_single_write = true;
1038 if (!map->format.format_write &&
1039 !(map->format.format_reg && map->format.format_val))
1042 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1043 if (map->work_buf == NULL) {
1044 ret = -ENOMEM;
1048 if (map->format.format_write) {
1049 map->defer_caching = false;
1050 map->reg_write = _regmap_bus_formatted_write;
1051 } else if (map->format.format_val) {
1052 map->defer_caching = true;
1053 map->reg_write = _regmap_bus_raw_write;
1058 map->range_tree = RB_ROOT;
1059 for (i = 0; i < config->num_ranges; i++) {
1060 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1064 if (range_cfg->range_max < range_cfg->range_min) {
1065 dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1066 range_cfg->range_max, range_cfg->range_min);
1070 if (range_cfg->range_max > map->max_register) {
1071 dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1072 range_cfg->range_max, map->max_register);
1076 if (range_cfg->selector_reg > map->max_register) {
1077 dev_err(map->dev,
1078 "Invalid range %d: selector out of map\n", i);
1082 if (range_cfg->window_len == 0) {
1083 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1088 /* Make sure, that this register range has no selector
1090 for (j = 0; j < config->num_ranges; j++) {
1091 unsigned int sel_reg = config->ranges[j].selector_reg;
1092 unsigned int win_min = config->ranges[j].window_start;
1094 config->ranges[j].window_len - 1;
1096 /* Allow data window inside its own virtual range */
1100 if (range_cfg->range_min <= sel_reg &&
1101 sel_reg <= range_cfg->range_max) {
1102 dev_err(map->dev,
1103 "Range %d: selector for %d in window\n",
1108 if (!(win_max < range_cfg->range_min ||
1109 win_min > range_cfg->range_max)) {
1110 dev_err(map->dev,
1111 "Range %d: window for %d in window\n",
1119 ret = -ENOMEM;
1123 new->map = map;
1124 new->name = range_cfg->name;
1125 new->range_min = range_cfg->range_min;
1126 new->range_max = range_cfg->range_max;
1127 new->selector_reg = range_cfg->selector_reg;
1128 new->selector_mask = range_cfg->selector_mask;
1129 new->selector_shift = range_cfg->selector_shift;
1130 new->window_start = range_cfg->window_start;
1131 new->window_len = range_cfg->window_len;
1134 dev_err(map->dev, "Failed to add range %d\n", i);
1139 if (map->selector_work_buf == NULL) {
1140 map->selector_work_buf =
1141 kzalloc(map->format.buf_size, GFP_KERNEL);
1142 if (map->selector_work_buf == NULL) {
1143 ret = -ENOMEM;
1167 kfree(map->work_buf);
1169 if (map->hwlock)
1170 hwspin_lock_free(map->hwlock);
1172 kfree_const(map->name);
1176 if (bus && bus->free_on_exit)
1177 kfree(bus);
1188 const struct regmap_bus *bus,
1198 return ERR_PTR(-ENOMEM);
1200 regmap = __regmap_init(dev, bus, bus_context, config,
1216 rm_field->regmap = regmap;
1217 rm_field->reg = reg_field.reg;
1218 rm_field->shift = reg_field.lsb;
1219 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1221 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1223 rm_field->id_size = reg_field.id_size;
1224 rm_field->id_offset = reg_field.id_offset;
1228 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1244 return ERR_PTR(-ENOMEM);
1255 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1262 * The return value will be an -ENOMEM on error or zero for success.
1276 return -ENOMEM;
1288 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1297 * The return value will be an -ENOMEM on error or zero for success.
1312 return -ENOMEM;
1324 * regmap_field_bulk_free() - Free register field allocated using
1336 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1344 * will be freed as per device-driver life-cycle.
1354 * devm_regmap_field_free() - Free a register field allocated using
1362 * will be freed as per device-driver life-cyle.
1372 * regmap_field_alloc() - Allocate and initialise a register field.
1387 return ERR_PTR(-ENOMEM);
1396 * regmap_field_free() - Free register field allocated using
1408 * regmap_reinit_cache() - Reinitialise the current register cache
1428 map->max_register = config->max_register;
1429 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1430 map->writeable_reg = config->writeable_reg;
1431 map->readable_reg = config->readable_reg;
1432 map->volatile_reg = config->volatile_reg;
1433 map->precious_reg = config->precious_reg;
1434 map->writeable_noinc_reg = config->writeable_noinc_reg;
1435 map->readable_noinc_reg = config->readable_noinc_reg;
1436 map->cache_type = config->cache_type;
1444 map->cache_bypass = false;
1445 map->cache_only = false;
1452 * regmap_exit() - Free a previously allocated register map
1460 regmap_detach_dev(map->dev, map);
1465 if (map->bus && map->bus->free_context)
1466 map->bus->free_context(map->bus_context);
1467 kfree(map->work_buf);
1468 while (!list_empty(&map->async_free)) {
1469 async = list_first_entry_or_null(&map->async_free,
1472 list_del(&async->list);
1473 kfree(async->work_buf);
1476 if (map->hwlock)
1477 hwspin_lock_free(map->hwlock);
1478 if (map->lock == regmap_lock_mutex)
1479 mutex_destroy(&map->mutex);
1480 kfree_const(map->name);
1481 kfree(map->patch);
1482 if (map->bus && map->bus->free_on_exit)
1483 kfree(map->bus);
1498 return (*r)->name && !strcmp((*r)->name, data);
1504 * dev_get_regmap() - Obtain the regmap (if any) for a device
1527 * regmap_get_device() - Obtain the device from a regmap
1535 return map->dev;
1540 struct regmap_range_node *range,
1549 win_offset = (*reg - range->range_min) % range->window_len;
1550 win_page = (*reg - range->range_min) / range->window_len;
1553 /* Bulk write shouldn't cross range boundary */
1554 if (*reg + val_num - 1 > range->range_max)
1555 return -EINVAL;
1558 if (val_num > range->window_len - win_offset)
1559 return -EINVAL;
1566 range->window_start + win_offset != range->selector_reg) {
1568 orig_work_buf = map->work_buf;
1569 map->work_buf = map->selector_work_buf;
1571 ret = _regmap_update_bits(map, range->selector_reg,
1572 range->selector_mask,
1573 win_page << range->selector_shift,
1576 map->work_buf = orig_work_buf;
1582 *reg = range->window_start + win_offset;
1593 if (!mask || !map->work_buf)
1596 buf = map->work_buf;
1604 reg += map->reg_base;
1606 if (map->format.reg_shift > 0)
1607 reg >>= map->format.reg_shift;
1608 else if (map->format.reg_shift < 0)
1609 reg <<= -(map->format.reg_shift);
1617 struct regmap_range_node *range;
1619 void *work_val = map->work_buf + map->format.reg_bytes +
1620 map->format.pad_bytes;
1622 int ret = -ENOTSUPP;
1626 /* Check for unwritable or noinc registers in range
1630 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1635 return -EINVAL;
1639 if (!map->cache_bypass && map->format.parse_val) {
1641 int val_bytes = map->format.val_bytes;
1644 i = noinc ? val_len - val_bytes : 0;
1646 ival = map->format.parse_val(val + i);
1650 dev_err(map->dev,
1656 if (map->cache_only) {
1657 map->cache_dirty = true;
1662 range = _regmap_range_lookup(map, reg);
1663 if (range) {
1664 int val_num = val_len / map->format.val_bytes;
1665 int win_offset = (reg - range->range_min) % range->window_len;
1666 int win_residue = range->window_len - win_offset;
1670 dev_dbg(map->dev, "Writing window %d/%zu\n",
1671 win_residue, val_len / map->format.val_bytes);
1674 map->format.val_bytes, noinc);
1679 val_num -= win_residue;
1680 val += win_residue * map->format.val_bytes;
1681 val_len -= win_residue * map->format.val_bytes;
1683 win_offset = (reg - range->range_min) %
1684 range->window_len;
1685 win_residue = range->window_len - win_offset;
1688 ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1694 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1695 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1696 map->write_flag_mask);
1703 if (val != work_val && val_len == map->format.val_bytes) {
1704 memcpy(work_val, val, map->format.val_bytes);
1708 if (map->async && map->bus && map->bus->async_write) {
1713 spin_lock_irqsave(&map->async_lock, flags);
1714 async = list_first_entry_or_null(&map->async_free,
1718 list_del(&async->list);
1719 spin_unlock_irqrestore(&map->async_lock, flags);
1722 async = map->bus->async_alloc();
1724 return -ENOMEM;
1726 async->work_buf = kzalloc(map->format.buf_size,
1728 if (!async->work_buf) {
1730 return -ENOMEM;
1734 async->map = map;
1737 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1738 map->format.reg_bytes + map->format.val_bytes);
1740 spin_lock_irqsave(&map->async_lock, flags);
1741 list_add_tail(&async->list, &map->async_list);
1742 spin_unlock_irqrestore(&map->async_lock, flags);
1745 ret = map->bus->async_write(map->bus_context,
1746 async->work_buf,
1747 map->format.reg_bytes +
1748 map->format.pad_bytes,
1751 ret = map->bus->async_write(map->bus_context,
1752 async->work_buf,
1753 map->format.reg_bytes +
1754 map->format.pad_bytes +
1758 dev_err(map->dev, "Failed to schedule write: %d\n",
1761 spin_lock_irqsave(&map->async_lock, flags);
1762 list_move(&async->list, &map->async_free);
1763 spin_unlock_irqrestore(&map->async_lock, flags);
1769 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1776 ret = map->write(map->bus_context, map->work_buf,
1777 map->format.reg_bytes +
1778 map->format.pad_bytes +
1780 else if (map->bus && map->bus->gather_write)
1781 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1782 map->format.reg_bytes +
1783 map->format.pad_bytes,
1786 ret = -ENOTSUPP;
1789 if (ret == -ENOTSUPP) {
1790 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1793 return -ENOMEM;
1795 memcpy(buf, map->work_buf, map->format.reg_bytes);
1796 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1798 ret = map->write(map->bus_context, buf, len);
1801 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1803 * thus call map->cache_ops->drop() directly
1805 if (map->cache_ops && map->cache_ops->drop)
1806 map->cache_ops->drop(map, reg, reg + 1);
1809 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1815 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1821 return map->write && map->format.format_val && map->format.format_reg;
1826 * regmap_get_raw_read_max - Get the maximum size we can read
1832 return map->max_raw_read;
1837 * regmap_get_raw_write_max - Get the maximum size we can read
1843 return map->max_raw_write;
1851 struct regmap_range_node *range;
1854 WARN_ON(!map->format.format_write);
1856 range = _regmap_range_lookup(map, reg);
1857 if (range) {
1858 ret = _regmap_select_page(map, &reg, range, 1);
1864 map->format.format_write(map, reg, val);
1868 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1879 struct regmap_range_node *range;
1882 range = _regmap_range_lookup(map, reg);
1883 if (range) {
1884 ret = _regmap_select_page(map, &reg, range, 1);
1890 return map->bus->reg_write(map->bus_context, reg, val);
1898 WARN_ON(!map->format.format_val);
1900 map->format.format_val(map->work_buf + map->format.reg_bytes
1901 + map->format.pad_bytes, val, 0);
1903 map->work_buf +
1904 map->format.reg_bytes +
1905 map->format.pad_bytes,
1906 map->format.val_bytes,
1912 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1922 return -EIO;
1924 if (!map->cache_bypass && !map->defer_caching) {
1928 if (map->cache_only) {
1929 map->cache_dirty = true;
1934 ret = map->reg_write(context, reg, val);
1937 dev_info(map->dev, "%x <= %x\n", reg, val);
1946 * regmap_write() - Write a value to a single register
1959 if (!IS_ALIGNED(reg, map->reg_stride))
1960 return -EINVAL;
1962 map->lock(map->lock_arg);
1966 map->unlock(map->lock_arg);
1973 * regmap_write_async() - Write a value to a single register asynchronously
1986 if (!IS_ALIGNED(reg, map->reg_stride))
1987 return -EINVAL;
1989 map->lock(map->lock_arg);
1991 map->async = true;
1995 map->async = false;
1997 map->unlock(map->lock_arg);
2006 size_t val_bytes = map->format.val_bytes;
2013 return -EINVAL;
2015 if (map->use_single_write)
2017 else if (map->max_raw_write && val_len > map->max_raw_write)
2018 chunk_regs = map->max_raw_write / val_bytes;
2031 val_len -= chunk_bytes;
2042 * regmap_raw_write() - Write raw values to one or more registers
2063 return -EINVAL;
2064 if (val_len % map->format.val_bytes)
2065 return -EINVAL;
2067 map->lock(map->lock_arg);
2071 map->unlock(map->lock_arg);
2080 size_t val_bytes = map->format.val_bytes;
2093 lastval = (unsigned int)u8p[val_count - 1];
2098 lastval = (unsigned int)u16p[val_count - 1];
2103 lastval = (unsigned int)u32p[val_count - 1];
2106 return -EINVAL;
2115 if (!map->cache_bypass && !map->defer_caching) {
2119 if (map->cache_only) {
2120 map->cache_dirty = true;
2124 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2126 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2130 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2145 if (i == (val_count - 1))
2164 * The regmap API usually assumes that bulk bus write operations will write a
2165 * range of registers. Some devices have certain registers for which a write
2182 if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2183 return -EINVAL;
2184 if (val_len % map->format.val_bytes)
2185 return -EINVAL;
2186 if (!IS_ALIGNED(reg, map->reg_stride))
2187 return -EINVAL;
2189 return -EINVAL;
2191 map->lock(map->lock_arg);
2194 ret = -EINVAL;
2202 if (map->bus->reg_noinc_write) {
2208 if (map->max_raw_write && map->max_raw_write < val_len)
2209 write_len = map->max_raw_write;
2216 val_len -= write_len;
2220 map->unlock(map->lock_arg);
2226 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2246 mask = (mask << field->shift) & field->mask;
2248 return regmap_update_bits_base(field->regmap, field->reg,
2249 mask, val << field->shift,
2255 * regmap_field_test_bits() - Check if all specified bits are set in a
2261 * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
2277 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2295 if (id >= field->id_size)
2296 return -EINVAL;
2298 mask = (mask << field->shift) & field->mask;
2300 return regmap_update_bits_base(field->regmap,
2301 field->reg + (field->id_offset * id),
2302 mask, val << field->shift,
2308 * regmap_bulk_write() - Write multiple registers to the device
2325 size_t val_bytes = map->format.val_bytes;
2327 if (!IS_ALIGNED(reg, map->reg_stride))
2328 return -EINVAL;
2334 if (!map->write || !map->format.parse_inplace) {
2335 map->lock(map->lock_arg);
2350 ret = -EINVAL;
2361 map->unlock(map->lock_arg);
2365 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2367 return -ENOMEM;
2370 map->format.parse_inplace(wval + i);
2399 size_t val_bytes = map->format.val_bytes;
2400 size_t reg_bytes = map->format.reg_bytes;
2401 size_t pad_bytes = map->format.pad_bytes;
2406 return -EINVAL;
2410 return -ENOMEM;
2421 map->format.format_reg(u8, reg, map->reg_shift);
2423 map->format.format_val(u8, val, 0);
2427 *u8 |= map->write_flag_mask;
2429 ret = map->write(map->bus_context, buf, len);
2442 struct regmap_range_node *range)
2444 unsigned int win_page = (reg - range->range_min) / range->window_len;
2467 struct regmap_range_node *range;
2469 range = _regmap_range_lookup(map, reg);
2470 if (range) {
2472 range);
2503 if (map->can_sleep)
2515 range, 1);
2537 if (!map->can_multi_write) {
2544 if (map->can_sleep)
2553 if (!map->format.parse_inplace)
2554 return -EINVAL;
2556 if (map->writeable_reg)
2559 if (!map->writeable_reg(map->dev, reg))
2560 return -EINVAL;
2561 if (!IS_ALIGNED(reg, map->reg_stride))
2562 return -EINVAL;
2565 if (!map->cache_bypass) {
2571 dev_err(map->dev,
2577 if (map->cache_only) {
2578 map->cache_dirty = true;
2583 WARN_ON(!map->bus);
2587 struct regmap_range_node *range;
2592 range = _regmap_range_lookup(map, reg);
2593 if (range || regs[i].delay_us) {
2598 return -ENOMEM;
2610 * regmap_multi_reg_write() - Write multiple registers to the device
2617 * pairs are supplied in any order, possibly not all in a single range.
2620 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2622 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2633 map->lock(map->lock_arg);
2637 map->unlock(map->lock_arg);
2644 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2668 map->lock(map->lock_arg);
2670 bypass = map->cache_bypass;
2671 map->cache_bypass = true;
2675 map->cache_bypass = bypass;
2677 map->unlock(map->lock_arg);
2684 * regmap_raw_write_async() - Write raw values to one or more registers
2697 * If supported by the underlying bus the write will be scheduled
2710 if (val_len % map->format.val_bytes)
2711 return -EINVAL;
2712 if (!IS_ALIGNED(reg, map->reg_stride))
2713 return -EINVAL;
2715 map->lock(map->lock_arg);
2717 map->async = true;
2721 map->async = false;
2723 map->unlock(map->lock_arg);
2732 struct regmap_range_node *range;
2735 if (!map->read)
2736 return -EINVAL;
2738 range = _regmap_range_lookup(map, reg);
2739 if (range) {
2740 ret = _regmap_select_page(map, &reg, range,
2741 noinc ? 1 : val_len / map->format.val_bytes);
2747 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2748 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2749 map->read_flag_mask);
2750 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2752 ret = map->read(map->bus_context, map->work_buf,
2753 map->format.reg_bytes + map->format.pad_bytes,
2756 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2765 struct regmap_range_node *range;
2768 range = _regmap_range_lookup(map, reg);
2769 if (range) {
2770 ret = _regmap_select_page(map, &reg, range, 1);
2776 return map->bus->reg_read(map->bus_context, reg, val);
2784 void *work_val = map->work_buf + map->format.reg_bytes +
2785 map->format.pad_bytes;
2787 if (!map->format.parse_val)
2788 return -EINVAL;
2790 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2792 *val = map->format.parse_val(work_val);
2803 if (!map->cache_bypass) {
2809 if (map->cache_only)
2810 return -EBUSY;
2813 return -EIO;
2815 ret = map->reg_read(context, reg, val);
2818 dev_info(map->dev, "%x => %x\n", reg, *val);
2822 if (!map->cache_bypass)
2830 * regmap_read() - Read a value from a single register
2843 if (!IS_ALIGNED(reg, map->reg_stride))
2844 return -EINVAL;
2846 map->lock(map->lock_arg);
2850 map->unlock(map->lock_arg);
2857 * regmap_read_bypassed() - Read a value from a single register direct
2872 if (!IS_ALIGNED(reg, map->reg_stride))
2873 return -EINVAL;
2875 map->lock(map->lock_arg);
2877 bypass = map->cache_bypass;
2878 cache_only = map->cache_only;
2879 map->cache_bypass = true;
2880 map->cache_only = false;
2884 map->cache_bypass = bypass;
2885 map->cache_only = cache_only;
2887 map->unlock(map->lock_arg);
2894 * regmap_raw_read() - Read raw data from the device
2907 size_t val_bytes = map->format.val_bytes;
2912 if (val_len % map->format.val_bytes)
2913 return -EINVAL;
2914 if (!IS_ALIGNED(reg, map->reg_stride))
2915 return -EINVAL;
2917 return -EINVAL;
2919 map->lock(map->lock_arg);
2921 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2922 map->cache_type == REGCACHE_NONE) {
2926 if (!map->cache_bypass && map->cache_only) {
2927 ret = -EBUSY;
2931 if (!map->read) {
2932 ret = -ENOTSUPP;
2936 if (map->use_single_read)
2938 else if (map->max_raw_read && val_len > map->max_raw_read)
2939 chunk_regs = map->max_raw_read / val_bytes;
2952 val_len -= chunk_bytes;
2971 map->format.format_val(val + (i * val_bytes), v, 0);
2976 map->unlock(map->lock_arg);
2992 * range of registers. Some devices have certain registers for which a read
3009 if (!map->read)
3010 return -ENOTSUPP;
3012 if (val_len % map->format.val_bytes)
3013 return -EINVAL;
3014 if (!IS_ALIGNED(reg, map->reg_stride))
3015 return -EINVAL;
3017 return -EINVAL;
3019 map->lock(map->lock_arg);
3022 ret = -EINVAL;
3032 if (!map->cache_bypass && map->cache_only) {
3033 ret = -EBUSY;
3038 if (map->bus->reg_noinc_read) {
3044 if (map->max_raw_read && map->max_raw_read < val_len)
3045 read_len = map->max_raw_read;
3052 val_len -= read_len;
3056 map->unlock(map->lock_arg);
3074 ret = regmap_read(field->regmap, field->reg, &reg_val);
3078 reg_val &= field->mask;
3079 reg_val >>= field->shift;
3087 * regmap_fields_read() - Read a value to a single register field with port ID
3102 if (id >= field->id_size)
3103 return -EINVAL;
3105 ret = regmap_read(field->regmap,
3106 field->reg + (field->id_offset * id),
3111 reg_val &= field->mask;
3112 reg_val >>= field->shift;
3127 map->lock(map->lock_arg);
3133 if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3134 ret = -EINVAL;
3144 switch (map->format.val_bytes) {
3155 ret = -EINVAL;
3160 map->unlock(map->lock_arg);
3165 * regmap_bulk_read() - Read multiple sequential registers from the device
3179 size_t val_bytes = map->format.val_bytes;
3182 if (!IS_ALIGNED(reg, map->reg_stride))
3183 return -EINVAL;
3185 return -EINVAL;
3187 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3193 map->format.parse_inplace(val + i);
3204 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3218 return -EINVAL;
3234 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3236 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3247 if (force_write || (tmp != orig) || map->force_write_field) {
3258 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3285 map->lock(map->lock_arg);
3287 map->async = async;
3291 map->async = false;
3293 map->unlock(map->lock_arg);
3300 * regmap_test_bits() - Check if all specified bits are set in a register.
3324 struct regmap *map = async->map;
3329 spin_lock(&map->async_lock);
3330 list_move(&async->list, &map->async_free);
3331 wake = list_empty(&map->async_list);
3334 map->async_ret = ret;
3336 spin_unlock(&map->async_lock);
3339 wake_up(&map->async_waitq);
3348 spin_lock_irqsave(&map->async_lock, flags);
3349 ret = list_empty(&map->async_list);
3350 spin_unlock_irqrestore(&map->async_lock, flags);
3356 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3369 if (!map->bus || !map->bus->async_write)
3374 wait_event(map->async_waitq, regmap_async_is_done(map));
3376 spin_lock_irqsave(&map->async_lock, flags);
3377 ret = map->async_ret;
3378 map->async_ret = 0;
3379 spin_unlock_irqrestore(&map->async_lock, flags);
3388 * regmap_register_patch - Register and apply register updates to be applied
3415 p = krealloc(map->patch,
3416 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3419 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3420 map->patch = p;
3421 map->patch_regs += num_regs;
3423 return -ENOMEM;
3426 map->lock(map->lock_arg);
3428 bypass = map->cache_bypass;
3430 map->cache_bypass = true;
3431 map->async = true;
3435 map->async = false;
3436 map->cache_bypass = bypass;
3438 map->unlock(map->lock_arg);
3447 * regmap_get_val_bytes() - Report the size of a register value
3456 if (map->format.format_write)
3457 return -EINVAL;
3459 return map->format.val_bytes;
3464 * regmap_get_max_register() - Report the max register value
3473 return map->max_register_is_set ? map->max_register : -EINVAL;
3478 * regmap_get_reg_stride() - Report the register address stride
3487 return map->reg_stride;
3492 * regmap_might_sleep() - Returns whether a regmap access might sleep.
3500 return map->can_sleep;
3507 if (!map->format.parse_val)
3508 return -EINVAL;
3510 *val = map->format.parse_val(buf);