Lines Matching refs:nvm
82 static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
84 iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
87 static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
89 void __iomem *base = nvm->base;
100 static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
102 void __iomem *base = nvm->base;
109 static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
111 void __iomem *base = nvm->base;
118 static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
120 void __iomem *base = nvm->base;
127 static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
129 void __iomem *base = nvm->base;
136 static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
143 idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
145 flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
146 if (idg_nvm_error(nvm))
152 fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
153 if (idg_nvm_error(nvm))
184 static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
188 idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
190 is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
191 if (idg_nvm_error(nvm))
200 static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
204 for (i = 0; i < nvm->nregions; i++) {
205 if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
206 nvm->regions[i].offset <= from &&
207 nvm->regions[i].size != 0)
214 static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
217 u32 data = idg_nvm_read32(nvm, to);
219 if (idg_nvm_error(nvm))
224 idg_nvm_write32(nvm, to, data);
225 if (idg_nvm_error(nvm))
231 static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
242 idg_nvm_set_region_id(nvm, region);
247 ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
266 idg_nvm_write32(nvm, to, data);
267 if (idg_nvm_error(nvm))
279 idg_nvm_write64(nvm, to + i, data);
280 if (idg_nvm_error(nvm))
289 idg_nvm_write32(nvm, to + i, data);
290 if (idg_nvm_error(nvm))
297 ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
305 static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
315 idg_nvm_set_region_id(nvm, region);
321 u32 data = idg_nvm_read32(nvm, from4);
323 if (idg_nvm_error(nvm))
338 u32 data = idg_nvm_read32(nvm, from);
340 if (idg_nvm_error(nvm))
350 u64 data = idg_nvm_read64(nvm, from + i);
352 if (idg_nvm_error(nvm))
360 u32 data = idg_nvm_read32(nvm, from + i);
362 if (idg_nvm_error(nvm))
370 u32 data = idg_nvm_read32(nvm, from + i);
372 if (idg_nvm_error(nvm))
381 idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
383 void __iomem *base2 = nvm->base2;
384 void __iomem *base = nvm->base;
393 if (nvm->non_posted_erase) {
417 static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device,
425 idg_nvm_error(nvm);
427 ret = idg_nvm_is_valid(nvm);
433 if (idg_nvm_get_access_map(nvm, &access_map))
436 for (i = 0, n = 0; i < nvm->nregions; i++) {
438 u8 id = nvm->regions[i].id;
441 region = idg_nvm_read32(nvm, address);
448 id, nvm->regions[i].name, region, base, limit);
452 id, nvm->regions[i].name);
453 nvm->regions[i].is_readable = 0;
457 if (nvm->size < limit)
458 nvm->size = limit;
460 nvm->regions[i].offset = base;
461 nvm->regions[i].size = limit - base + 1;
463 nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
465 nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
467 nvm->regions[i].name,
468 nvm->regions[i].id,
469 nvm->regions[i].offset,
470 nvm->regions[i].size,
471 nvm->regions[i].is_readable,
472 nvm->regions[i].is_writable);
474 if (nvm->regions[i].is_readable)
478 nvm->non_posted_erase = non_posted_erase;
481 dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase);
486 nvm->size += 1;
493 struct intel_dg_nvm *nvm = mtd->priv;
502 if (WARN_ON(!nvm))
515 guard(mutex)(&nvm->lock);
524 idx = idg_nvm_get_region(nvm, addr);
525 if (idx >= nvm->nregions) {
531 from = addr - nvm->regions[idx].offset;
532 region = nvm->regions[idx].id;
534 if (len > nvm->regions[idx].size - from)
535 len = nvm->regions[idx].size - from;
538 region, nvm->regions[idx].name, from, len);
540 bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
543 info->fail_addr += nvm->regions[idx].offset;
557 struct intel_dg_nvm *nvm = mtd->priv;
562 if (WARN_ON(!nvm))
565 idx = idg_nvm_get_region(nvm, from);
568 nvm->regions[idx].id, nvm->regions[idx].name, from, len);
570 if (idx >= nvm->nregions) {
575 from -= nvm->regions[idx].offset;
576 region = nvm->regions[idx].id;
577 if (len > nvm->regions[idx].size - from)
578 len = nvm->regions[idx].size - from;
580 guard(mutex)(&nvm->lock);
582 ret = idg_read(nvm, region, from, len, buf);
596 struct intel_dg_nvm *nvm = mtd->priv;
601 if (WARN_ON(!nvm))
604 idx = idg_nvm_get_region(nvm, to);
607 nvm->regions[idx].id, nvm->regions[idx].name, to, len);
609 if (idx >= nvm->nregions) {
614 to -= nvm->regions[idx].offset;
615 region = nvm->regions[idx].id;
616 if (len > nvm->regions[idx].size - to)
617 len = nvm->regions[idx].size - to;
619 guard(mutex)(&nvm->lock);
621 ret = idg_write(nvm, region, to, len, buf);
634 struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
637 pr_debug("freeing intel_dg nvm\n");
638 for (i = 0; i < nvm->nregions; i++)
639 kfree(nvm->regions[i].name);
640 mutex_destroy(&nvm->lock);
641 kfree(nvm);
647 struct intel_dg_nvm *nvm = master->priv;
649 if (WARN_ON(!nvm))
651 pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
652 kref_get(&nvm->refcnt);
660 struct intel_dg_nvm *nvm = master->priv;
662 if (WARN_ON(!nvm))
664 pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
665 kref_put(&nvm->refcnt, intel_dg_nvm_release);
668 static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
677 nvm->mtd.owner = THIS_MODULE;
678 nvm->mtd.dev.parent = device;
679 nvm->mtd.flags = MTD_CAP_NORFLASH;
680 nvm->mtd.type = MTD_DATAFLASH;
681 nvm->mtd.priv = nvm;
682 nvm->mtd._write = intel_dg_mtd_write;
683 nvm->mtd._read = intel_dg_mtd_read;
684 nvm->mtd._erase = intel_dg_mtd_erase;
685 nvm->mtd._get_device = intel_dg_mtd_get_device;
686 nvm->mtd._put_device = intel_dg_mtd_put_device;
687 nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
688 nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
689 nvm->mtd.size = nvm->size;
691 parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL);
695 for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
696 if (!nvm->regions[i].is_readable)
698 parts[n].name = nvm->regions[i].name;
699 parts[n].offset = nvm->regions[i].offset;
700 parts[n].size = nvm->regions[i].size;
701 if (!nvm->regions[i].is_writable && !writable_override)
706 ret = mtd_device_register(&nvm->mtd, parts, n);
716 struct intel_dg_nvm *nvm;
735 nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
736 if (!nvm)
739 kref_init(&nvm->refcnt);
740 mutex_init(&nvm->lock);
750 nvm->regions[n].name = name;
751 nvm->regions[n].id = i;
754 nvm->nregions = n; /* in case where kasprintf fail */
756 nvm->base = devm_ioremap_resource(device, &invm->bar);
757 if (IS_ERR(nvm->base)) {
758 ret = PTR_ERR(nvm->base);
763 nvm->base2 = devm_ioremap_resource(device, &invm->bar2);
764 if (IS_ERR(nvm->base2)) {
765 ret = PTR_ERR(nvm->base2);
770 ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase);
772 dev_err(device, "cannot initialize nvm %d\n", ret);
776 ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
782 dev_set_drvdata(&aux_dev->dev, nvm);
787 kref_put(&nvm->refcnt, intel_dg_nvm_release);
793 struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
795 if (!nvm)
798 mtd_device_unregister(&nvm->mtd);
802 kref_put(&nvm->refcnt, intel_dg_nvm_release);
807 .name = "i915.nvm",
810 .name = "xe.nvm",