xref: /linux/drivers/mtd/devices/mtd_intel_dg.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/intel_dg_nvm_aux.h>
12 #include <linux/io.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/partitions.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/sizes.h>
21 #include <linux/types.h>
22 
23 struct intel_dg_nvm {
24 	struct kref refcnt;
25 	struct mtd_info mtd;
26 	struct mutex lock; /* region access lock */
27 	void __iomem *base;
28 	void __iomem *base2;
29 	bool non_posted_erase;
30 
31 	size_t size;
32 	unsigned int nregions;
33 	struct {
34 		const char *name;
35 		u8 id;
36 		u64 offset;
37 		u64 size;
38 		unsigned int is_readable:1;
39 		unsigned int is_writable:1;
40 	} regions[] __counted_by(nregions);
41 };
42 
43 #define NVM_TRIGGER_REG       0x00000000
44 #define NVM_VALSIG_REG        0x00000010
45 #define NVM_ADDRESS_REG       0x00000040
46 #define NVM_REGION_ID_REG     0x00000044
47 #define NVM_DEBUG_REG         0x00000000
48 /*
49  * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
50  * [23:16]-Reserved
51  * [31:24]-Erase MEM RegionID
52  */
53 #define NVM_ERASE_REG         0x00000048
54 #define NVM_ACCESS_ERROR_REG  0x00000070
55 #define NVM_ADDRESS_ERROR_REG 0x00000074
56 
57 /* Flash Valid Signature */
58 #define NVM_FLVALSIG          0x0FF0A55A
59 
60 #define NVM_MAP_ADDR_MASK     GENMASK(7, 0)
61 #define NVM_MAP_ADDR_SHIFT    0x00000004
62 
63 #define NVM_REGION_ID_DESCRIPTOR  0
64 /* Flash Region Base Address */
65 #define NVM_FRBA      0x40
66 /* Flash Region __n - Flash Descriptor Record */
67 #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
68 /*  Flash Map 1 Register */
69 #define NVM_FLMAP1_REG  0x18
70 #define NVM_FLMSTR4_OFFSET 0x00C
71 
72 #define NVM_ACCESS_ERROR_PCIE_MASK 0x7
73 
74 #define NVM_FREG_BASE_MASK GENMASK(15, 0)
75 #define NVM_FREG_ADDR_MASK GENMASK(31, 16)
76 #define NVM_FREG_ADDR_SHIFT 12
77 #define NVM_FREG_MIN_REGION_SIZE 0xFFF
78 
79 #define NVM_NON_POSTED_ERASE_DONE BIT(23)
80 #define NVM_NON_POSTED_ERASE_DONE_ITER 3000
81 
idg_nvm_set_region_id(struct intel_dg_nvm * nvm,u8 region)82 static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
83 {
84 	iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
85 }
86 
idg_nvm_error(struct intel_dg_nvm * nvm)87 static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
88 {
89 	void __iomem *base = nvm->base;
90 
91 	u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
92 
93 	/* reset error bits */
94 	if (reg)
95 		iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
96 
97 	return reg;
98 }
99 
idg_nvm_read32(struct intel_dg_nvm * nvm,u32 address)100 static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
101 {
102 	void __iomem *base = nvm->base;
103 
104 	iowrite32(address, base + NVM_ADDRESS_REG);
105 
106 	return ioread32(base + NVM_TRIGGER_REG);
107 }
108 
idg_nvm_read64(struct intel_dg_nvm * nvm,u32 address)109 static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
110 {
111 	void __iomem *base = nvm->base;
112 
113 	iowrite32(address, base + NVM_ADDRESS_REG);
114 
115 	return readq(base + NVM_TRIGGER_REG);
116 }
117 
idg_nvm_write32(struct intel_dg_nvm * nvm,u32 address,u32 data)118 static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
119 {
120 	void __iomem *base = nvm->base;
121 
122 	iowrite32(address, base + NVM_ADDRESS_REG);
123 
124 	iowrite32(data, base + NVM_TRIGGER_REG);
125 }
126 
idg_nvm_write64(struct intel_dg_nvm * nvm,u32 address,u64 data)127 static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
128 {
129 	void __iomem *base = nvm->base;
130 
131 	iowrite32(address, base + NVM_ADDRESS_REG);
132 
133 	writeq(data, base + NVM_TRIGGER_REG);
134 }
135 
idg_nvm_get_access_map(struct intel_dg_nvm * nvm,u32 * access_map)136 static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
137 {
138 	u32 fmstr4_addr;
139 	u32 fmstr4;
140 	u32 flmap1;
141 	u32 fmba;
142 
143 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
144 
145 	flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
146 	if (idg_nvm_error(nvm))
147 		return -EIO;
148 	/* Get Flash Master Baser Address (FMBA) */
149 	fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
150 	fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
151 
152 	fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
153 	if (idg_nvm_error(nvm))
154 		return -EIO;
155 
156 	*access_map = fmstr4;
157 	return 0;
158 }
159 
160 /*
161  * Region read/write access encoded in the access map
162  * in the following order from the lower bit:
163  * [3:0] regions 12-15 read state
164  * [7:4] regions 12-15 write state
165  * [19:8] regions 0-11 read state
166  * [31:20] regions 0-11 write state
167  */
idg_nvm_region_readable(u32 access_map,u8 region)168 static bool idg_nvm_region_readable(u32 access_map, u8 region)
169 {
170 	if (region < 12)
171 		return access_map & BIT(region + 8); /* [19:8] */
172 	else
173 		return access_map & BIT(region - 12); /* [3:0] */
174 }
175 
idg_nvm_region_writable(u32 access_map,u8 region)176 static bool idg_nvm_region_writable(u32 access_map, u8 region)
177 {
178 	if (region < 12)
179 		return access_map & BIT(region + 20); /* [31:20] */
180 	else
181 		return access_map & BIT(region - 8); /* [7:4] */
182 }
183 
idg_nvm_is_valid(struct intel_dg_nvm * nvm)184 static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
185 {
186 	u32 is_valid;
187 
188 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
189 
190 	is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
191 	if (idg_nvm_error(nvm))
192 		return -EIO;
193 
194 	if (is_valid != NVM_FLVALSIG)
195 		return -ENODEV;
196 
197 	return 0;
198 }
199 
idg_nvm_get_region(const struct intel_dg_nvm * nvm,loff_t from)200 static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
201 {
202 	unsigned int i;
203 
204 	for (i = 0; i < nvm->nregions; i++) {
205 		if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
206 		    nvm->regions[i].offset <= from &&
207 		    nvm->regions[i].size != 0)
208 			break;
209 	}
210 
211 	return i;
212 }
213 
idg_nvm_rewrite_partial(struct intel_dg_nvm * nvm,loff_t to,loff_t offset,size_t len,const u32 * newdata)214 static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
215 				       loff_t offset, size_t len, const u32 *newdata)
216 {
217 	u32 data = idg_nvm_read32(nvm, to);
218 
219 	if (idg_nvm_error(nvm))
220 		return -EIO;
221 
222 	memcpy((u8 *)&data + offset, newdata, len);
223 
224 	idg_nvm_write32(nvm, to, data);
225 	if (idg_nvm_error(nvm))
226 		return -EIO;
227 
228 	return len;
229 }
230 
idg_write(struct intel_dg_nvm * nvm,u8 region,loff_t to,size_t len,const unsigned char * buf)231 static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
232 			 loff_t to, size_t len, const unsigned char *buf)
233 {
234 	size_t len_s = len;
235 	size_t to_shift;
236 	size_t len8;
237 	size_t len4;
238 	ssize_t ret;
239 	size_t to4;
240 	size_t i;
241 
242 	idg_nvm_set_region_id(nvm, region);
243 
244 	to4 = ALIGN_DOWN(to, sizeof(u32));
245 	to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
246 	if (to - to4) {
247 		ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
248 		if (ret < 0)
249 			return ret;
250 
251 		buf += to_shift;
252 		to += to_shift;
253 		len_s -= to_shift;
254 	}
255 
256 	if (!IS_ALIGNED(to, sizeof(u64)) &&
257 	    ((to ^ (to + len_s)) & GENMASK(31, 10))) {
258 		/*
259 		 * Workaround reads/writes across 1k-aligned addresses
260 		 * (start u32 before 1k, end u32 after)
261 		 * as this fails on hardware.
262 		 */
263 		u32 data;
264 
265 		memcpy(&data, &buf[0], sizeof(u32));
266 		idg_nvm_write32(nvm, to, data);
267 		if (idg_nvm_error(nvm))
268 			return -EIO;
269 		buf += sizeof(u32);
270 		to += sizeof(u32);
271 		len_s -= sizeof(u32);
272 	}
273 
274 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
275 	for (i = 0; i < len8; i += sizeof(u64)) {
276 		u64 data;
277 
278 		memcpy(&data, &buf[i], sizeof(u64));
279 		idg_nvm_write64(nvm, to + i, data);
280 		if (idg_nvm_error(nvm))
281 			return -EIO;
282 	}
283 
284 	len4 = len_s - len8;
285 	if (len4 >= sizeof(u32)) {
286 		u32 data;
287 
288 		memcpy(&data, &buf[i], sizeof(u32));
289 		idg_nvm_write32(nvm, to + i, data);
290 		if (idg_nvm_error(nvm))
291 			return -EIO;
292 		i += sizeof(u32);
293 		len4 -= sizeof(u32);
294 	}
295 
296 	if (len4 > 0) {
297 		ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
298 		if (ret < 0)
299 			return ret;
300 	}
301 
302 	return len;
303 }
304 
idg_read(struct intel_dg_nvm * nvm,u8 region,loff_t from,size_t len,unsigned char * buf)305 static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
306 			loff_t from, size_t len, unsigned char *buf)
307 {
308 	size_t len_s = len;
309 	size_t from_shift;
310 	size_t from4;
311 	size_t len8;
312 	size_t len4;
313 	size_t i;
314 
315 	idg_nvm_set_region_id(nvm, region);
316 
317 	from4 = ALIGN_DOWN(from, sizeof(u32));
318 	from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
319 
320 	if (from - from4) {
321 		u32 data = idg_nvm_read32(nvm, from4);
322 
323 		if (idg_nvm_error(nvm))
324 			return -EIO;
325 		memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
326 		len_s -= from_shift;
327 		buf += from_shift;
328 		from += from_shift;
329 	}
330 
331 	if (!IS_ALIGNED(from, sizeof(u64)) &&
332 	    ((from ^ (from + len_s)) & GENMASK(31, 10))) {
333 		/*
334 		 * Workaround reads/writes across 1k-aligned addresses
335 		 * (start u32 before 1k, end u32 after)
336 		 * as this fails on hardware.
337 		 */
338 		u32 data = idg_nvm_read32(nvm, from);
339 
340 		if (idg_nvm_error(nvm))
341 			return -EIO;
342 		memcpy(&buf[0], &data, sizeof(data));
343 		len_s -= sizeof(u32);
344 		buf += sizeof(u32);
345 		from += sizeof(u32);
346 	}
347 
348 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
349 	for (i = 0; i < len8; i += sizeof(u64)) {
350 		u64 data = idg_nvm_read64(nvm, from + i);
351 
352 		if (idg_nvm_error(nvm))
353 			return -EIO;
354 
355 		memcpy(&buf[i], &data, sizeof(data));
356 	}
357 
358 	len4 = len_s - len8;
359 	if (len4 >= sizeof(u32)) {
360 		u32 data = idg_nvm_read32(nvm, from + i);
361 
362 		if (idg_nvm_error(nvm))
363 			return -EIO;
364 		memcpy(&buf[i], &data, sizeof(data));
365 		i += sizeof(u32);
366 		len4 -= sizeof(u32);
367 	}
368 
369 	if (len4 > 0) {
370 		u32 data = idg_nvm_read32(nvm, from + i);
371 
372 		if (idg_nvm_error(nvm))
373 			return -EIO;
374 		memcpy(&buf[i], &data, len4);
375 	}
376 
377 	return len;
378 }
379 
380 static ssize_t
idg_erase(struct intel_dg_nvm * nvm,u8 region,loff_t from,u64 len,u64 * fail_addr)381 idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
382 {
383 	void __iomem *base2 = nvm->base2;
384 	void __iomem *base = nvm->base;
385 	const u32 block = 0x10;
386 	u32 iter = 0;
387 	u32 reg;
388 	u64 i;
389 
390 	for (i = 0; i < len; i += SZ_4K) {
391 		iowrite32(from + i, base + NVM_ADDRESS_REG);
392 		iowrite32(region << 24 | block, base + NVM_ERASE_REG);
393 		if (nvm->non_posted_erase) {
394 			/* Wait for Erase Done */
395 			reg = ioread32(base2 + NVM_DEBUG_REG);
396 			while (!(reg & NVM_NON_POSTED_ERASE_DONE) &&
397 			       ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) {
398 				msleep(10);
399 				reg = ioread32(base2 + NVM_DEBUG_REG);
400 			}
401 			if (reg & NVM_NON_POSTED_ERASE_DONE) {
402 				/* Clear Erase Done */
403 				iowrite32(reg, base2 + NVM_DEBUG_REG);
404 			} else {
405 				*fail_addr = from + i;
406 				return -ETIME;
407 			}
408 		}
409 		/* Since the writes are via sgunit
410 		 * we cannot do back to back erases.
411 		 */
412 		msleep(50);
413 	}
414 	return len;
415 }
416 
intel_dg_nvm_init(struct intel_dg_nvm * nvm,struct device * device,bool non_posted_erase)417 static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device,
418 			     bool non_posted_erase)
419 {
420 	u32 access_map = 0;
421 	unsigned int i, n;
422 	int ret;
423 
424 	/* clean error register, previous errors are ignored */
425 	idg_nvm_error(nvm);
426 
427 	ret = idg_nvm_is_valid(nvm);
428 	if (ret) {
429 		dev_err(device, "The MEM is not valid %d\n", ret);
430 		return ret;
431 	}
432 
433 	if (idg_nvm_get_access_map(nvm, &access_map))
434 		return -EIO;
435 
436 	for (i = 0, n = 0; i < nvm->nregions; i++) {
437 		u32 address, base, limit, region;
438 		u8 id = nvm->regions[i].id;
439 
440 		address = NVM_FLREG(id);
441 		region = idg_nvm_read32(nvm, address);
442 
443 		base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
444 		limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
445 			NVM_FREG_MIN_REGION_SIZE;
446 
447 		dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
448 			id, nvm->regions[i].name, region, base, limit);
449 
450 		if (base >= limit || (i > 0 && limit == 0)) {
451 			dev_dbg(device, "[%d] %s: disabled\n",
452 				id, nvm->regions[i].name);
453 			nvm->regions[i].is_readable = 0;
454 			continue;
455 		}
456 
457 		if (nvm->size < limit)
458 			nvm->size = limit;
459 
460 		nvm->regions[i].offset = base;
461 		nvm->regions[i].size = limit - base + 1;
462 		/* No write access to descriptor; mask it out*/
463 		nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
464 
465 		nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
466 		dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
467 			nvm->regions[i].name,
468 			nvm->regions[i].id,
469 			nvm->regions[i].offset,
470 			nvm->regions[i].size,
471 			nvm->regions[i].is_readable,
472 			nvm->regions[i].is_writable);
473 
474 		if (nvm->regions[i].is_readable)
475 			n++;
476 	}
477 
478 	nvm->non_posted_erase = non_posted_erase;
479 
480 	dev_dbg(device, "Registered %d regions\n", n);
481 	dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase);
482 
483 	/* Need to add 1 to the amount of memory
484 	 * so it is reported as an even block
485 	 */
486 	nvm->size += 1;
487 
488 	return n;
489 }
490 
intel_dg_mtd_erase(struct mtd_info * mtd,struct erase_info * info)491 static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
492 {
493 	struct intel_dg_nvm *nvm = mtd->priv;
494 	size_t total_len;
495 	unsigned int idx;
496 	ssize_t bytes;
497 	loff_t from;
498 	size_t len;
499 	u8 region;
500 	u64 addr;
501 
502 	if (WARN_ON(!nvm))
503 		return -EINVAL;
504 
505 	if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) {
506 		dev_err(&mtd->dev, "unaligned erase %llx %llx\n",
507 			info->addr, info->len);
508 		info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
509 		return -EINVAL;
510 	}
511 
512 	total_len = info->len;
513 	addr = info->addr;
514 
515 	guard(mutex)(&nvm->lock);
516 
517 	while (total_len > 0) {
518 		if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
519 			dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
520 			info->fail_addr = addr;
521 			return -ERANGE;
522 		}
523 
524 		idx = idg_nvm_get_region(nvm, addr);
525 		if (idx >= nvm->nregions) {
526 			dev_err(&mtd->dev, "out of range");
527 			info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
528 			return -ERANGE;
529 		}
530 
531 		from = addr - nvm->regions[idx].offset;
532 		region = nvm->regions[idx].id;
533 		len = total_len;
534 		if (len > nvm->regions[idx].size - from)
535 			len = nvm->regions[idx].size - from;
536 
537 		dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n",
538 			region, nvm->regions[idx].name, from, len);
539 
540 		bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
541 		if (bytes < 0) {
542 			dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
543 			info->fail_addr += nvm->regions[idx].offset;
544 			return bytes;
545 		}
546 
547 		addr += len;
548 		total_len -= len;
549 	}
550 
551 	return 0;
552 }
553 
intel_dg_mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)554 static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
555 			     size_t *retlen, u_char *buf)
556 {
557 	struct intel_dg_nvm *nvm = mtd->priv;
558 	unsigned int idx;
559 	ssize_t ret;
560 	u8 region;
561 
562 	if (WARN_ON(!nvm))
563 		return -EINVAL;
564 
565 	idx = idg_nvm_get_region(nvm, from);
566 
567 	dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n",
568 		nvm->regions[idx].id, nvm->regions[idx].name, from, len);
569 
570 	if (idx >= nvm->nregions) {
571 		dev_err(&mtd->dev, "out of range");
572 		return -ERANGE;
573 	}
574 
575 	from -= nvm->regions[idx].offset;
576 	region = nvm->regions[idx].id;
577 	if (len > nvm->regions[idx].size - from)
578 		len = nvm->regions[idx].size - from;
579 
580 	guard(mutex)(&nvm->lock);
581 
582 	ret = idg_read(nvm, region, from, len, buf);
583 	if (ret < 0) {
584 		dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
585 		return ret;
586 	}
587 
588 	*retlen = ret;
589 
590 	return 0;
591 }
592 
intel_dg_mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)593 static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
594 			      size_t *retlen, const u_char *buf)
595 {
596 	struct intel_dg_nvm *nvm = mtd->priv;
597 	unsigned int idx;
598 	ssize_t ret;
599 	u8 region;
600 
601 	if (WARN_ON(!nvm))
602 		return -EINVAL;
603 
604 	idx = idg_nvm_get_region(nvm, to);
605 
606 	dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n",
607 		nvm->regions[idx].id, nvm->regions[idx].name, to, len);
608 
609 	if (idx >= nvm->nregions) {
610 		dev_err(&mtd->dev, "out of range");
611 		return -ERANGE;
612 	}
613 
614 	to -= nvm->regions[idx].offset;
615 	region = nvm->regions[idx].id;
616 	if (len > nvm->regions[idx].size - to)
617 		len = nvm->regions[idx].size - to;
618 
619 	guard(mutex)(&nvm->lock);
620 
621 	ret = idg_write(nvm, region, to, len, buf);
622 	if (ret < 0) {
623 		dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
624 		return ret;
625 	}
626 
627 	*retlen = ret;
628 
629 	return 0;
630 }
631 
intel_dg_nvm_release(struct kref * kref)632 static void intel_dg_nvm_release(struct kref *kref)
633 {
634 	struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
635 	int i;
636 
637 	pr_debug("freeing intel_dg nvm\n");
638 	for (i = 0; i < nvm->nregions; i++)
639 		kfree(nvm->regions[i].name);
640 	mutex_destroy(&nvm->lock);
641 	kfree(nvm);
642 }
643 
intel_dg_mtd_get_device(struct mtd_info * mtd)644 static int intel_dg_mtd_get_device(struct mtd_info *mtd)
645 {
646 	struct mtd_info *master = mtd_get_master(mtd);
647 	struct intel_dg_nvm *nvm = master->priv;
648 
649 	if (WARN_ON(!nvm))
650 		return -EINVAL;
651 	pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
652 	kref_get(&nvm->refcnt);
653 
654 	return 0;
655 }
656 
intel_dg_mtd_put_device(struct mtd_info * mtd)657 static void intel_dg_mtd_put_device(struct mtd_info *mtd)
658 {
659 	struct mtd_info *master = mtd_get_master(mtd);
660 	struct intel_dg_nvm *nvm = master->priv;
661 
662 	if (WARN_ON(!nvm))
663 		return;
664 	pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
665 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
666 }
667 
intel_dg_nvm_init_mtd(struct intel_dg_nvm * nvm,struct device * device,unsigned int nparts,bool writable_override)668 static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
669 				 unsigned int nparts, bool writable_override)
670 {
671 	struct mtd_partition *parts = NULL;
672 	unsigned int i, n;
673 	int ret;
674 
675 	dev_dbg(device, "registering with mtd\n");
676 
677 	nvm->mtd.owner = THIS_MODULE;
678 	nvm->mtd.dev.parent = device;
679 	nvm->mtd.flags = MTD_CAP_NORFLASH;
680 	nvm->mtd.type = MTD_DATAFLASH;
681 	nvm->mtd.priv = nvm;
682 	nvm->mtd._write = intel_dg_mtd_write;
683 	nvm->mtd._read = intel_dg_mtd_read;
684 	nvm->mtd._erase = intel_dg_mtd_erase;
685 	nvm->mtd._get_device = intel_dg_mtd_get_device;
686 	nvm->mtd._put_device = intel_dg_mtd_put_device;
687 	nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
688 	nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
689 	nvm->mtd.size = nvm->size;
690 
691 	parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL);
692 	if (!parts)
693 		return -ENOMEM;
694 
695 	for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
696 		if (!nvm->regions[i].is_readable)
697 			continue;
698 		parts[n].name = nvm->regions[i].name;
699 		parts[n].offset  = nvm->regions[i].offset;
700 		parts[n].size = nvm->regions[i].size;
701 		if (!nvm->regions[i].is_writable && !writable_override)
702 			parts[n].mask_flags = MTD_WRITEABLE;
703 		n++;
704 	}
705 
706 	ret = mtd_device_register(&nvm->mtd, parts, n);
707 
708 	kfree(parts);
709 	return ret;
710 }
711 
intel_dg_mtd_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * aux_dev_id)712 static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
713 			      const struct auxiliary_device_id *aux_dev_id)
714 {
715 	struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
716 	struct intel_dg_nvm *nvm;
717 	struct device *device;
718 	unsigned int nregions;
719 	unsigned int i, n;
720 	int ret;
721 
722 	device = &aux_dev->dev;
723 
724 	/* count available regions */
725 	for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
726 		if (invm->regions[i].name)
727 			nregions++;
728 	}
729 
730 	if (!nregions) {
731 		dev_err(device, "no regions defined\n");
732 		return -ENODEV;
733 	}
734 
735 	nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
736 	if (!nvm)
737 		return -ENOMEM;
738 
739 	kref_init(&nvm->refcnt);
740 	mutex_init(&nvm->lock);
741 
742 	for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
743 		if (!invm->regions[i].name)
744 			continue;
745 
746 		char *name = kasprintf(GFP_KERNEL, "%s.%s",
747 				       dev_name(&aux_dev->dev), invm->regions[i].name);
748 		if (!name)
749 			continue;
750 		nvm->regions[n].name = name;
751 		nvm->regions[n].id = i;
752 		n++;
753 	}
754 	nvm->nregions = n; /* in case where kasprintf fail */
755 
756 	nvm->base = devm_ioremap_resource(device, &invm->bar);
757 	if (IS_ERR(nvm->base)) {
758 		ret = PTR_ERR(nvm->base);
759 		goto err;
760 	}
761 
762 	if (invm->non_posted_erase) {
763 		nvm->base2 = devm_ioremap_resource(device, &invm->bar2);
764 		if (IS_ERR(nvm->base2)) {
765 			ret = PTR_ERR(nvm->base2);
766 			goto err;
767 		}
768 	}
769 
770 	ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase);
771 	if (ret < 0) {
772 		dev_err(device, "cannot initialize nvm %d\n", ret);
773 		goto err;
774 	}
775 
776 	ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
777 	if (ret) {
778 		dev_err(device, "failed init mtd %d\n", ret);
779 		goto err;
780 	}
781 
782 	dev_set_drvdata(&aux_dev->dev, nvm);
783 
784 	return 0;
785 
786 err:
787 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
788 	return ret;
789 }
790 
intel_dg_mtd_remove(struct auxiliary_device * aux_dev)791 static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
792 {
793 	struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
794 
795 	if (!nvm)
796 		return;
797 
798 	mtd_device_unregister(&nvm->mtd);
799 
800 	dev_set_drvdata(&aux_dev->dev, NULL);
801 
802 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
803 }
804 
805 static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
806 	{
807 		.name = "i915.nvm",
808 	},
809 	{
810 		.name = "xe.nvm",
811 	},
812 	{
813 		/* sentinel */
814 	}
815 };
816 MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
817 
818 static struct auxiliary_driver intel_dg_mtd_driver = {
819 	.probe  = intel_dg_mtd_probe,
820 	.remove = intel_dg_mtd_remove,
821 	.driver = {
822 		/* auxiliary_driver_register() sets .name to be the modname */
823 	},
824 	.id_table = intel_dg_mtd_id_table
825 };
826 module_auxiliary_driver(intel_dg_mtd_driver);
827 
828 MODULE_LICENSE("GPL");
829 MODULE_AUTHOR("Intel Corporation");
830 MODULE_DESCRIPTION("Intel DGFX MTD driver");
831