1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVM helpers
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9 #include <linux/idr.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include "tb.h"
14
15 #define NVM_MIN_SIZE SZ_32K
16 #define NVM_MAX_SIZE SZ_1M
17 #define NVM_DATA_DWORDS 16
18
19 /* Intel specific NVM offsets */
20 #define INTEL_NVM_DEVID 0x05
21 #define INTEL_NVM_VERSION 0x08
22 #define INTEL_NVM_CSS 0x10
23 #define INTEL_NVM_FLASH_SIZE 0x45
24
25 /* ASMedia specific NVM offsets */
26 #define ASMEDIA_NVM_DATE 0x1c
27 #define ASMEDIA_NVM_VERSION 0x28
28
29 static DEFINE_IDA(nvm_ida);
30
31 /**
32 * struct tb_nvm_vendor_ops - Vendor specific NVM operations
33 * @read_version: Reads out NVM version from the flash
34 * @validate: Validates the NVM image before update (optional)
35 * @write_headers: Writes headers before the rest of the image (optional)
36 */
37 struct tb_nvm_vendor_ops {
38 int (*read_version)(struct tb_nvm *nvm);
39 int (*validate)(struct tb_nvm *nvm);
40 int (*write_headers)(struct tb_nvm *nvm);
41 };
42
43 /**
44 * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
45 * @vendor: Vendor ID
46 * @vops: Vendor specific NVM operations
47 *
48 * Maps vendor ID to NVM vendor operations. If there is no mapping then
49 * NVM firmware upgrade is disabled for the device.
50 */
51 struct tb_nvm_vendor {
52 u16 vendor;
53 const struct tb_nvm_vendor_ops *vops;
54 };
55
intel_switch_nvm_version(struct tb_nvm * nvm)56 static int intel_switch_nvm_version(struct tb_nvm *nvm)
57 {
58 struct tb_switch *sw = tb_to_switch(nvm->dev);
59 u32 val, nvm_size, hdr_size;
60 int ret;
61
62 /*
63 * If the switch is in safe-mode the only accessible portion of
64 * the NVM is the non-active one where userspace is expected to
65 * write new functional NVM.
66 */
67 if (sw->safe_mode)
68 return 0;
69
70 ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
71 if (ret)
72 return ret;
73
74 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
75 nvm_size = (SZ_1M << (val & 7)) / 8;
76 nvm_size = (nvm_size - hdr_size) / 2;
77
78 ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
79 if (ret)
80 return ret;
81
82 nvm->major = (val >> 16) & 0xff;
83 nvm->minor = (val >> 8) & 0xff;
84 nvm->active_size = nvm_size;
85
86 return 0;
87 }
88
intel_switch_nvm_validate(struct tb_nvm * nvm)89 static int intel_switch_nvm_validate(struct tb_nvm *nvm)
90 {
91 struct tb_switch *sw = tb_to_switch(nvm->dev);
92 unsigned int image_size, hdr_size;
93 u16 ds_size, device_id;
94 u8 *buf = nvm->buf;
95
96 image_size = nvm->buf_data_size;
97
98 /*
99 * FARB pointer must point inside the image and must at least
100 * contain parts of the digital section we will be reading here.
101 */
102 hdr_size = (*(u32 *)buf) & 0xffffff;
103 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
104 return -EINVAL;
105
106 /* Digital section start should be aligned to 4k page */
107 if (!IS_ALIGNED(hdr_size, SZ_4K))
108 return -EINVAL;
109
110 /*
111 * Read digital section size and check that it also fits inside
112 * the image.
113 */
114 ds_size = *(u16 *)(buf + hdr_size);
115 if (ds_size >= image_size)
116 return -EINVAL;
117
118 if (sw->safe_mode)
119 return 0;
120
121 /*
122 * Make sure the device ID in the image matches the one
123 * we read from the switch config space.
124 */
125 device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
126 if (device_id != sw->config.device_id)
127 return -EINVAL;
128
129 /* Skip headers in the image */
130 nvm->buf_data_start = buf + hdr_size;
131 nvm->buf_data_size = image_size - hdr_size;
132
133 return 0;
134 }
135
intel_switch_nvm_write_headers(struct tb_nvm * nvm)136 static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
137 {
138 struct tb_switch *sw = tb_to_switch(nvm->dev);
139
140 if (sw->generation < 3) {
141 int ret;
142
143 /* Write CSS headers first */
144 ret = dma_port_flash_write(sw->dma_port,
145 DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
146 DMA_PORT_CSS_MAX_SIZE);
147 if (ret)
148 return ret;
149 }
150
151 return 0;
152 }
153
154 static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
155 .read_version = intel_switch_nvm_version,
156 .validate = intel_switch_nvm_validate,
157 .write_headers = intel_switch_nvm_write_headers,
158 };
159
asmedia_switch_nvm_version(struct tb_nvm * nvm)160 static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
161 {
162 struct tb_switch *sw = tb_to_switch(nvm->dev);
163 u32 val;
164 int ret;
165
166 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
167 if (ret)
168 return ret;
169
170 nvm->major = (val << 16) & 0xff0000;
171 nvm->major |= val & 0x00ff00;
172 nvm->major |= (val >> 16) & 0x0000ff;
173
174 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
175 if (ret)
176 return ret;
177
178 nvm->minor = (val << 16) & 0xff0000;
179 nvm->minor |= val & 0x00ff00;
180 nvm->minor |= (val >> 16) & 0x0000ff;
181
182 /* ASMedia NVM size is fixed to 512k */
183 nvm->active_size = SZ_512K;
184
185 return 0;
186 }
187
188 static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
189 .read_version = asmedia_switch_nvm_version,
190 };
191
192 /* Router vendor NVM support table */
193 static const struct tb_nvm_vendor switch_nvm_vendors[] = {
194 { 0x174c, &asmedia_switch_nvm_ops },
195 { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
196 { 0x8087, &intel_switch_nvm_ops },
197 };
198
intel_retimer_nvm_version(struct tb_nvm * nvm)199 static int intel_retimer_nvm_version(struct tb_nvm *nvm)
200 {
201 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
202 u32 val, nvm_size;
203 int ret;
204
205 ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
206 if (ret)
207 return ret;
208
209 nvm->major = (val >> 16) & 0xff;
210 nvm->minor = (val >> 8) & 0xff;
211
212 ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
213 if (ret)
214 return ret;
215
216 nvm_size = (SZ_1M << (val & 7)) / 8;
217 nvm_size = (nvm_size - SZ_16K) / 2;
218 nvm->active_size = nvm_size;
219
220 return 0;
221 }
222
intel_retimer_nvm_validate(struct tb_nvm * nvm)223 static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
224 {
225 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
226 unsigned int image_size, hdr_size;
227 u8 *buf = nvm->buf;
228 u16 ds_size, device;
229
230 image_size = nvm->buf_data_size;
231
232 /*
233 * FARB pointer must point inside the image and must at least
234 * contain parts of the digital section we will be reading here.
235 */
236 hdr_size = (*(u32 *)buf) & 0xffffff;
237 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
238 return -EINVAL;
239
240 /* Digital section start should be aligned to 4k page */
241 if (!IS_ALIGNED(hdr_size, SZ_4K))
242 return -EINVAL;
243
244 /*
245 * Read digital section size and check that it also fits inside
246 * the image.
247 */
248 ds_size = *(u16 *)(buf + hdr_size);
249 if (ds_size >= image_size)
250 return -EINVAL;
251
252 /*
253 * Make sure the device ID in the image matches the retimer
254 * hardware.
255 */
256 device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
257 if (device != rt->device)
258 return -EINVAL;
259
260 /* Skip headers in the image */
261 nvm->buf_data_start = buf + hdr_size;
262 nvm->buf_data_size = image_size - hdr_size;
263
264 return 0;
265 }
266
267 static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
268 .read_version = intel_retimer_nvm_version,
269 .validate = intel_retimer_nvm_validate,
270 };
271
272 /* Retimer vendor NVM support table */
273 static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
274 { 0x8087, &intel_retimer_nvm_ops },
275 };
276
277 /**
278 * tb_nvm_alloc() - Allocate new NVM structure
279 * @dev: Device owning the NVM
280 *
281 * Allocates new NVM structure with unique @id and returns it.
282 *
283 * Return:
284 * * Pointer to &struct tb_nvm - On success.
285 * * %-EOPNOTSUPP - If the NVM format of the @dev is not known by the
286 * kernel.
287 * * %ERR_PTR - In case of failure.
288 */
tb_nvm_alloc(struct device * dev)289 struct tb_nvm *tb_nvm_alloc(struct device *dev)
290 {
291 const struct tb_nvm_vendor_ops *vops = NULL;
292 struct tb_nvm *nvm;
293 int ret, i;
294
295 if (tb_is_switch(dev)) {
296 const struct tb_switch *sw = tb_to_switch(dev);
297
298 for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
299 const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
300
301 if (v->vendor == sw->config.vendor_id) {
302 vops = v->vops;
303 break;
304 }
305 }
306
307 if (!vops) {
308 tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
309 sw->config.vendor_id);
310 return ERR_PTR(-EOPNOTSUPP);
311 }
312 } else if (tb_is_retimer(dev)) {
313 const struct tb_retimer *rt = tb_to_retimer(dev);
314
315 for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
316 const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
317
318 if (v->vendor == rt->vendor) {
319 vops = v->vops;
320 break;
321 }
322 }
323
324 if (!vops) {
325 dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
326 rt->vendor);
327 return ERR_PTR(-EOPNOTSUPP);
328 }
329 } else {
330 return ERR_PTR(-EOPNOTSUPP);
331 }
332
333 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
334 if (!nvm)
335 return ERR_PTR(-ENOMEM);
336
337 ret = ida_alloc(&nvm_ida, GFP_KERNEL);
338 if (ret < 0) {
339 kfree(nvm);
340 return ERR_PTR(ret);
341 }
342
343 nvm->id = ret;
344 nvm->dev = dev;
345 nvm->vops = vops;
346
347 return nvm;
348 }
349
350 /**
351 * tb_nvm_read_version() - Read and populate NVM version
352 * @nvm: NVM structure
353 *
354 * Uses vendor specific means to read and fill out the existing
355 * active NVM version.
356 *
357 * Return: %0 on success, negative errno otherwise.
358 */
tb_nvm_read_version(struct tb_nvm * nvm)359 int tb_nvm_read_version(struct tb_nvm *nvm)
360 {
361 const struct tb_nvm_vendor_ops *vops = nvm->vops;
362
363 if (vops && vops->read_version)
364 return vops->read_version(nvm);
365
366 return -EOPNOTSUPP;
367 }
368
369 /**
370 * tb_nvm_validate() - Validate new NVM image
371 * @nvm: NVM structure
372 *
373 * Runs vendor specific validation over the new NVM image. As a
374 * side effect, updates @nvm->buf_data_start and @nvm->buf_data_size
375 * fields to match the actual data to be written to the NVM.
376 *
377 * Return: %0 on successful validation, negative errno otherwise.
378 */
tb_nvm_validate(struct tb_nvm * nvm)379 int tb_nvm_validate(struct tb_nvm *nvm)
380 {
381 const struct tb_nvm_vendor_ops *vops = nvm->vops;
382 unsigned int image_size;
383 u8 *buf = nvm->buf;
384
385 if (!buf)
386 return -EINVAL;
387 if (!vops)
388 return -EOPNOTSUPP;
389
390 /* Just do basic image size checks */
391 image_size = nvm->buf_data_size;
392 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
393 return -EINVAL;
394
395 /*
396 * Set the default data start in the buffer. The validate method
397 * below can change this if needed.
398 */
399 nvm->buf_data_start = buf;
400
401 return vops->validate ? vops->validate(nvm) : 0;
402 }
403
404 /**
405 * tb_nvm_write_headers() - Write headers before the rest of the image
406 * @nvm: NVM structure
407 *
408 * If the vendor NVM format requires writing headers before the rest of
409 * the image, this function does that. Can be called even if the device
410 * does not need this.
411 *
412 * Return: %0 on success, negative errno otherwise.
413 */
tb_nvm_write_headers(struct tb_nvm * nvm)414 int tb_nvm_write_headers(struct tb_nvm *nvm)
415 {
416 const struct tb_nvm_vendor_ops *vops = nvm->vops;
417
418 return vops->write_headers ? vops->write_headers(nvm) : 0;
419 }
420
421 /**
422 * tb_nvm_add_active() - Adds active NVMem device to NVM
423 * @nvm: NVM structure
424 * @reg_read: Pointer to the function to read the NVM (passed directly to the
425 * NVMem device)
426 *
427 * Registers new active NVmem device for @nvm. The @reg_read is called
428 * directly from NVMem so it must handle possible concurrent access if
429 * needed. The first parameter passed to @reg_read is @nvm structure.
430 *
431 * Return: %0 on success, negative errno otherwise.
432 */
tb_nvm_add_active(struct tb_nvm * nvm,nvmem_reg_read_t reg_read)433 int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
434 {
435 struct nvmem_config config;
436 struct nvmem_device *nvmem;
437
438 memset(&config, 0, sizeof(config));
439
440 config.name = "nvm_active";
441 config.reg_read = reg_read;
442 config.read_only = true;
443 config.id = nvm->id;
444 config.stride = 4;
445 config.word_size = 4;
446 config.size = nvm->active_size;
447 config.dev = nvm->dev;
448 config.owner = THIS_MODULE;
449 config.priv = nvm;
450
451 nvmem = nvmem_register(&config);
452 if (IS_ERR(nvmem))
453 return PTR_ERR(nvmem);
454
455 nvm->active = nvmem;
456 return 0;
457 }
458
459 /**
460 * tb_nvm_write_buf() - Write data to @nvm buffer
461 * @nvm: NVM structure
462 * @offset: Offset where to write the data
463 * @val: Data buffer to write
464 * @bytes: Number of bytes to write
465 *
466 * Helper function to cache the new NVM image before it is actually
467 * written to the flash. Copies @bytes from @val to @nvm->buf starting
468 * from @offset.
469 *
470 * Return:
471 * * %0 - On success.
472 * * %-ENOMEM - If buffer allocation failed.
473 * * Negative errno - Another error occurred.
474 */
tb_nvm_write_buf(struct tb_nvm * nvm,unsigned int offset,void * val,size_t bytes)475 int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
476 size_t bytes)
477 {
478 if (!nvm->buf) {
479 nvm->buf = vmalloc(NVM_MAX_SIZE);
480 if (!nvm->buf)
481 return -ENOMEM;
482 }
483
484 nvm->flushed = false;
485 nvm->buf_data_size = offset + bytes;
486 memcpy(nvm->buf + offset, val, bytes);
487 return 0;
488 }
489
490 /**
491 * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
492 * @nvm: NVM structure
493 * @reg_write: Pointer to the function to write the NVM (passed directly
494 * to the NVMem device)
495 *
496 * Registers new non-active NVmem device for @nvm. The @reg_write is called
497 * directly from NVMem so it must handle possible concurrent access if
498 * needed. The first parameter passed to @reg_write is @nvm structure.
499 * The size of the NVMem device is set to %NVM_MAX_SIZE.
500 *
501 * Return: %0 on success, negative errno otherwise.
502 */
tb_nvm_add_non_active(struct tb_nvm * nvm,nvmem_reg_write_t reg_write)503 int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
504 {
505 struct nvmem_config config;
506 struct nvmem_device *nvmem;
507
508 memset(&config, 0, sizeof(config));
509
510 config.name = "nvm_non_active";
511 config.reg_write = reg_write;
512 config.root_only = true;
513 config.id = nvm->id;
514 config.stride = 4;
515 config.word_size = 4;
516 config.size = NVM_MAX_SIZE;
517 config.dev = nvm->dev;
518 config.owner = THIS_MODULE;
519 config.priv = nvm;
520
521 nvmem = nvmem_register(&config);
522 if (IS_ERR(nvmem))
523 return PTR_ERR(nvmem);
524
525 nvm->non_active = nvmem;
526 return 0;
527 }
528
529 /**
530 * tb_nvm_free() - Release NVM and its resources
531 * @nvm: NVM structure to release
532 *
533 * Releases NVM and the NVMem devices if they were registered.
534 */
tb_nvm_free(struct tb_nvm * nvm)535 void tb_nvm_free(struct tb_nvm *nvm)
536 {
537 if (nvm) {
538 nvmem_unregister(nvm->non_active);
539 nvmem_unregister(nvm->active);
540 vfree(nvm->buf);
541 ida_free(&nvm_ida, nvm->id);
542 }
543 kfree(nvm);
544 }
545
546 /**
547 * tb_nvm_read_data() - Read data from NVM
548 * @address: Start address on the flash
549 * @buf: Buffer where the read data is copied
550 * @size: Size of the buffer in bytes
551 * @retries: Number of retries if block read fails
552 * @read_block: Function that reads block from the flash
553 * @read_block_data: Data passsed to @read_block
554 *
555 * This is a generic function that reads data from NVM or NVM like
556 * device.
557 *
558 * Return: %0 on success, negative errno otherwise.
559 */
tb_nvm_read_data(unsigned int address,void * buf,size_t size,unsigned int retries,read_block_fn read_block,void * read_block_data)560 int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
561 unsigned int retries, read_block_fn read_block,
562 void *read_block_data)
563 {
564 do {
565 unsigned int dwaddress, dwords, offset;
566 u8 data[NVM_DATA_DWORDS * 4];
567 size_t nbytes;
568 int ret;
569
570 offset = address & 3;
571 nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
572
573 dwaddress = address / 4;
574 dwords = ALIGN(nbytes, 4) / 4;
575
576 ret = read_block(read_block_data, dwaddress, data, dwords);
577 if (ret) {
578 if (ret != -ENODEV && retries--)
579 continue;
580 return ret;
581 }
582
583 nbytes -= offset;
584 memcpy(buf, data + offset, nbytes);
585
586 size -= nbytes;
587 address += nbytes;
588 buf += nbytes;
589 } while (size > 0);
590
591 return 0;
592 }
593
594 /**
595 * tb_nvm_write_data() - Write data to NVM
596 * @address: Start address on the flash
597 * @buf: Buffer where the data is copied from
598 * @size: Size of the buffer in bytes
599 * @retries: Number of retries if the block write fails
600 * @write_block: Function that writes block to the flash
601 * @write_block_data: Data passed to @write_block
602 *
603 * This is generic function that writes data to NVM or NVM like device.
604 *
605 * Return: %0 on success, negative errno otherwise.
606 */
tb_nvm_write_data(unsigned int address,const void * buf,size_t size,unsigned int retries,write_block_fn write_block,void * write_block_data)607 int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
608 unsigned int retries, write_block_fn write_block,
609 void *write_block_data)
610 {
611 do {
612 unsigned int offset, dwaddress;
613 u8 data[NVM_DATA_DWORDS * 4];
614 size_t nbytes;
615 int ret;
616
617 offset = address & 3;
618 nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
619
620 memcpy(data + offset, buf, nbytes);
621
622 dwaddress = address / 4;
623 ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
624 if (ret) {
625 if (ret == -ETIMEDOUT) {
626 if (retries--)
627 continue;
628 ret = -EIO;
629 }
630 return ret;
631 }
632
633 size -= nbytes;
634 address += nbytes;
635 buf += nbytes;
636 } while (size > 0);
637
638 return 0;
639 }
640
tb_nvm_exit(void)641 void tb_nvm_exit(void)
642 {
643 ida_destroy(&nvm_ida);
644 }
645