1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Google, Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/rslib.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/mm.h>
20 #include <asm/page.h>
21
22 #include "ram_internal.h"
23
24 /**
25 * struct persistent_ram_buffer - persistent circular RAM buffer
26 *
27 * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
28 * @start: First valid byte in the buffer.
29 * @size: Number of valid bytes in the buffer.
30 * @data: The contents of the buffer.
31 */
32 struct persistent_ram_buffer {
33 uint32_t sig;
34 atomic_t start;
35 atomic_t size;
36 uint8_t data[];
37 };
38
39 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
40
buffer_size(struct persistent_ram_zone * prz)41 static inline size_t buffer_size(struct persistent_ram_zone *prz)
42 {
43 return atomic_read(&prz->buffer->size);
44 }
45
buffer_start(struct persistent_ram_zone * prz)46 static inline size_t buffer_start(struct persistent_ram_zone *prz)
47 {
48 return atomic_read(&prz->buffer->start);
49 }
50
51 /* increase and wrap the start pointer, returning the old value */
buffer_start_add(struct persistent_ram_zone * prz,size_t a)52 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
53 {
54 int old;
55 int new;
56 unsigned long flags = 0;
57
58 if (!(prz->flags & PRZ_FLAG_NO_LOCK))
59 raw_spin_lock_irqsave(&prz->buffer_lock, flags);
60
61 old = atomic_read(&prz->buffer->start);
62 new = old + a;
63 while (unlikely(new >= prz->buffer_size))
64 new -= prz->buffer_size;
65 atomic_set(&prz->buffer->start, new);
66
67 if (!(prz->flags & PRZ_FLAG_NO_LOCK))
68 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
69
70 return old;
71 }
72
73 /* increase the size counter until it hits the max size */
buffer_size_add(struct persistent_ram_zone * prz,size_t a)74 static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
75 {
76 size_t old;
77 size_t new;
78 unsigned long flags = 0;
79
80 if (!(prz->flags & PRZ_FLAG_NO_LOCK))
81 raw_spin_lock_irqsave(&prz->buffer_lock, flags);
82
83 old = atomic_read(&prz->buffer->size);
84 if (old == prz->buffer_size)
85 goto exit;
86
87 new = old + a;
88 if (new > prz->buffer_size)
89 new = prz->buffer_size;
90 atomic_set(&prz->buffer->size, new);
91
92 exit:
93 if (!(prz->flags & PRZ_FLAG_NO_LOCK))
94 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
95 }
96
persistent_ram_encode_rs8(struct persistent_ram_zone * prz,uint8_t * data,size_t len,uint8_t * ecc)97 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
98 uint8_t *data, size_t len, uint8_t *ecc)
99 {
100 int i;
101
102 /* Initialize the parity buffer */
103 memset(prz->ecc_info.par, 0,
104 prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
105 encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
106 for (i = 0; i < prz->ecc_info.ecc_size; i++)
107 ecc[i] = prz->ecc_info.par[i];
108 }
109
persistent_ram_decode_rs8(struct persistent_ram_zone * prz,void * data,size_t len,uint8_t * ecc)110 static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
111 void *data, size_t len, uint8_t *ecc)
112 {
113 int i;
114
115 for (i = 0; i < prz->ecc_info.ecc_size; i++)
116 prz->ecc_info.par[i] = ecc[i];
117 return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
118 NULL, 0, NULL, 0, NULL);
119 }
120
persistent_ram_update_ecc(struct persistent_ram_zone * prz,unsigned int start,unsigned int count)121 static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
122 unsigned int start, unsigned int count)
123 {
124 struct persistent_ram_buffer *buffer = prz->buffer;
125 uint8_t *buffer_end = buffer->data + prz->buffer_size;
126 uint8_t *block;
127 uint8_t *par;
128 int ecc_block_size = prz->ecc_info.block_size;
129 int ecc_size = prz->ecc_info.ecc_size;
130 int size = ecc_block_size;
131
132 if (!ecc_size)
133 return;
134
135 block = buffer->data + (start & ~(ecc_block_size - 1));
136 par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
137
138 do {
139 if (block + ecc_block_size > buffer_end)
140 size = buffer_end - block;
141 persistent_ram_encode_rs8(prz, block, size, par);
142 block += ecc_block_size;
143 par += ecc_size;
144 } while (block < buffer->data + start + count);
145 }
146
persistent_ram_update_header_ecc(struct persistent_ram_zone * prz)147 static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
148 {
149 struct persistent_ram_buffer *buffer = prz->buffer;
150
151 if (!prz->ecc_info.ecc_size)
152 return;
153
154 persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
155 prz->par_header);
156 }
157
persistent_ram_ecc_old(struct persistent_ram_zone * prz)158 static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
159 {
160 struct persistent_ram_buffer *buffer = prz->buffer;
161 uint8_t *block;
162 uint8_t *par;
163
164 if (!prz->ecc_info.ecc_size)
165 return;
166
167 block = buffer->data;
168 par = prz->par_buffer;
169 while (block < buffer->data + buffer_size(prz)) {
170 int numerr;
171 int size = prz->ecc_info.block_size;
172 if (block + size > buffer->data + prz->buffer_size)
173 size = buffer->data + prz->buffer_size - block;
174 numerr = persistent_ram_decode_rs8(prz, block, size, par);
175 if (numerr > 0) {
176 pr_devel("error in block %p, %d\n", block, numerr);
177 prz->corrected_bytes += numerr;
178 } else if (numerr < 0) {
179 pr_devel("uncorrectable error in block %p\n", block);
180 prz->bad_blocks++;
181 }
182 block += prz->ecc_info.block_size;
183 par += prz->ecc_info.ecc_size;
184 }
185 }
186
persistent_ram_init_ecc(struct persistent_ram_zone * prz,struct persistent_ram_ecc_info * ecc_info)187 static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
188 struct persistent_ram_ecc_info *ecc_info)
189 {
190 int numerr;
191 struct persistent_ram_buffer *buffer = prz->buffer;
192 size_t ecc_blocks;
193 size_t ecc_total;
194
195 if (!ecc_info || !ecc_info->ecc_size)
196 return 0;
197
198 prz->ecc_info.block_size = ecc_info->block_size ?: 128;
199 prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
200 prz->ecc_info.symsize = ecc_info->symsize ?: 8;
201 prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
202
203 ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
204 prz->ecc_info.block_size +
205 prz->ecc_info.ecc_size);
206 ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
207 if (ecc_total >= prz->buffer_size) {
208 pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
209 __func__, prz->ecc_info.ecc_size,
210 ecc_total, prz->buffer_size);
211 return -EINVAL;
212 }
213
214 prz->buffer_size -= ecc_total;
215 prz->par_buffer = buffer->data + prz->buffer_size;
216 prz->par_header = prz->par_buffer +
217 ecc_blocks * prz->ecc_info.ecc_size;
218
219 /*
220 * first consecutive root is 0
221 * primitive element to generate roots = 1
222 */
223 prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
224 0, 1, prz->ecc_info.ecc_size);
225 if (prz->rs_decoder == NULL) {
226 pr_info("init_rs failed\n");
227 return -EINVAL;
228 }
229
230 /* allocate workspace instead of using stack VLA */
231 prz->ecc_info.par = kmalloc_objs(*prz->ecc_info.par,
232 prz->ecc_info.ecc_size);
233 if (!prz->ecc_info.par) {
234 pr_err("cannot allocate ECC parity workspace\n");
235 return -ENOMEM;
236 }
237
238 prz->corrected_bytes = 0;
239 prz->bad_blocks = 0;
240
241 numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
242 prz->par_header);
243 if (numerr > 0) {
244 pr_info("error in header, %d\n", numerr);
245 prz->corrected_bytes += numerr;
246 } else if (numerr < 0) {
247 pr_info_ratelimited("uncorrectable error in header\n");
248 prz->bad_blocks++;
249 }
250
251 return 0;
252 }
253
persistent_ram_ecc_string(struct persistent_ram_zone * prz,char * str,size_t len)254 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
255 char *str, size_t len)
256 {
257 ssize_t ret;
258
259 if (!prz->ecc_info.ecc_size)
260 return 0;
261
262 if (prz->corrected_bytes || prz->bad_blocks)
263 ret = snprintf(str, len, ""
264 "\nECC: %d Corrected bytes, %d unrecoverable blocks\n",
265 prz->corrected_bytes, prz->bad_blocks);
266 else
267 ret = snprintf(str, len, "\nECC: No errors detected\n");
268
269 return ret;
270 }
271
persistent_ram_update(struct persistent_ram_zone * prz,const void * s,unsigned int start,unsigned int count)272 static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
273 const void *s, unsigned int start, unsigned int count)
274 {
275 struct persistent_ram_buffer *buffer = prz->buffer;
276 memcpy_toio(buffer->data + start, s, count);
277 persistent_ram_update_ecc(prz, start, count);
278 }
279
persistent_ram_update_user(struct persistent_ram_zone * prz,const void __user * s,unsigned int start,unsigned int count)280 static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
281 const void __user *s, unsigned int start, unsigned int count)
282 {
283 struct persistent_ram_buffer *buffer = prz->buffer;
284 int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ?
285 -EFAULT : 0;
286 persistent_ram_update_ecc(prz, start, count);
287 return ret;
288 }
289
persistent_ram_save_old(struct persistent_ram_zone * prz)290 void persistent_ram_save_old(struct persistent_ram_zone *prz)
291 {
292 struct persistent_ram_buffer *buffer = prz->buffer;
293 size_t size = buffer_size(prz);
294 size_t start = buffer_start(prz);
295
296 if (!size)
297 return;
298
299 /*
300 * If the existing buffer is differently sized, free it so a new
301 * one is allocated. This can happen when persistent_ram_save_old()
302 * is called early in boot and later for a timer-triggered
303 * survivable crash when the crash dumps don't match in size
304 * (which would be extremely unlikely given kmsg buffers usually
305 * exceed prz buffer sizes).
306 */
307 if (prz->old_log && prz->old_log_size != size)
308 persistent_ram_free_old(prz);
309
310 if (!prz->old_log) {
311 persistent_ram_ecc_old(prz);
312 prz->old_log = kvzalloc(size, GFP_KERNEL);
313 }
314 if (!prz->old_log) {
315 pr_err("failed to allocate buffer\n");
316 return;
317 }
318
319 prz->old_log_size = size;
320 memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
321 memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
322 }
323
persistent_ram_write(struct persistent_ram_zone * prz,const void * s,unsigned int count)324 int notrace persistent_ram_write(struct persistent_ram_zone *prz,
325 const void *s, unsigned int count)
326 {
327 int rem;
328 int c = count;
329 size_t start;
330
331 if (unlikely(c > prz->buffer_size)) {
332 s += c - prz->buffer_size;
333 c = prz->buffer_size;
334 }
335
336 buffer_size_add(prz, c);
337
338 start = buffer_start_add(prz, c);
339
340 rem = prz->buffer_size - start;
341 if (unlikely(rem < c)) {
342 persistent_ram_update(prz, s, start, rem);
343 s += rem;
344 c -= rem;
345 start = 0;
346 }
347 persistent_ram_update(prz, s, start, c);
348
349 persistent_ram_update_header_ecc(prz);
350
351 return count;
352 }
353
persistent_ram_write_user(struct persistent_ram_zone * prz,const void __user * s,unsigned int count)354 int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
355 const void __user *s, unsigned int count)
356 {
357 int rem, ret = 0, c = count;
358 size_t start;
359
360 if (unlikely(c > prz->buffer_size)) {
361 s += c - prz->buffer_size;
362 c = prz->buffer_size;
363 }
364
365 buffer_size_add(prz, c);
366
367 start = buffer_start_add(prz, c);
368
369 rem = prz->buffer_size - start;
370 if (unlikely(rem < c)) {
371 ret = persistent_ram_update_user(prz, s, start, rem);
372 s += rem;
373 c -= rem;
374 start = 0;
375 }
376 if (likely(!ret))
377 ret = persistent_ram_update_user(prz, s, start, c);
378
379 persistent_ram_update_header_ecc(prz);
380
381 return unlikely(ret) ? ret : count;
382 }
383
persistent_ram_old_size(struct persistent_ram_zone * prz)384 size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
385 {
386 return prz->old_log_size;
387 }
388
persistent_ram_old(struct persistent_ram_zone * prz)389 void *persistent_ram_old(struct persistent_ram_zone *prz)
390 {
391 return prz->old_log;
392 }
393
persistent_ram_free_old(struct persistent_ram_zone * prz)394 void persistent_ram_free_old(struct persistent_ram_zone *prz)
395 {
396 kvfree(prz->old_log);
397 prz->old_log = NULL;
398 prz->old_log_size = 0;
399 }
400
persistent_ram_zap(struct persistent_ram_zone * prz)401 void persistent_ram_zap(struct persistent_ram_zone *prz)
402 {
403 atomic_set(&prz->buffer->start, 0);
404 atomic_set(&prz->buffer->size, 0);
405 persistent_ram_update_header_ecc(prz);
406 }
407
408 #define MEM_TYPE_WCOMBINE 0
409 #define MEM_TYPE_NONCACHED 1
410 #define MEM_TYPE_NORMAL 2
411
persistent_ram_vmap(phys_addr_t start,size_t size,unsigned int memtype)412 static void *persistent_ram_vmap(phys_addr_t start, size_t size,
413 unsigned int memtype)
414 {
415 struct page **pages;
416 phys_addr_t page_start;
417 unsigned int page_count;
418 pgprot_t prot;
419 unsigned int i;
420 void *vaddr;
421
422 page_start = start - offset_in_page(start);
423 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
424
425 switch (memtype) {
426 case MEM_TYPE_NORMAL:
427 prot = PAGE_KERNEL;
428 break;
429 case MEM_TYPE_NONCACHED:
430 prot = pgprot_noncached(PAGE_KERNEL);
431 break;
432 case MEM_TYPE_WCOMBINE:
433 prot = pgprot_writecombine(PAGE_KERNEL);
434 break;
435 default:
436 pr_err("invalid mem_type=%d\n", memtype);
437 return NULL;
438 }
439
440 pages = kmalloc_objs(struct page *, page_count);
441 if (!pages) {
442 pr_err("%s: Failed to allocate array for %u pages\n",
443 __func__, page_count);
444 return NULL;
445 }
446
447 for (i = 0; i < page_count; i++) {
448 phys_addr_t addr = page_start + i * PAGE_SIZE;
449 pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
450 }
451 /*
452 * VM_IOREMAP used here to bypass this region during vread()
453 * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
454 */
455 vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
456 kfree(pages);
457
458 /*
459 * vmap() may fail and return NULL. Do not add the offset in this
460 * case, otherwise a NULL mapping would appear successful.
461 */
462 if (!vaddr)
463 return NULL;
464
465 /*
466 * Since vmap() uses page granularity, we must add the offset
467 * into the page here, to get the byte granularity address
468 * into the mapping to represent the actual "start" location.
469 */
470 return vaddr + offset_in_page(start);
471 }
472
persistent_ram_iomap(phys_addr_t start,size_t size,unsigned int memtype,char * label)473 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
474 unsigned int memtype, char *label)
475 {
476 void *va;
477
478 if (!request_mem_region(start, size, label ?: "ramoops")) {
479 pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
480 label ?: "ramoops",
481 (unsigned long long)size, (unsigned long long)start);
482 return NULL;
483 }
484
485 if (memtype)
486 va = ioremap(start, size);
487 else
488 va = ioremap_wc(start, size);
489
490 /* We must release the mem region if ioremap fails. */
491 if (!va)
492 release_mem_region(start, size);
493
494 /*
495 * Since request_mem_region() and ioremap() are byte-granularity
496 * there is no need handle anything special like we do when the
497 * vmap() case in persistent_ram_vmap() above.
498 */
499 return va;
500 }
501
persistent_ram_buffer_map(phys_addr_t start,phys_addr_t size,struct persistent_ram_zone * prz,int memtype)502 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
503 struct persistent_ram_zone *prz, int memtype)
504 {
505 prz->paddr = start;
506 prz->size = size;
507
508 if (pfn_valid(start >> PAGE_SHIFT))
509 prz->vaddr = persistent_ram_vmap(start, size, memtype);
510 else
511 prz->vaddr = persistent_ram_iomap(start, size, memtype,
512 prz->label);
513
514 if (!prz->vaddr) {
515 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
516 (unsigned long long)size, (unsigned long long)start);
517 return -ENOMEM;
518 }
519
520 prz->buffer = prz->vaddr;
521 prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
522
523 return 0;
524 }
525
persistent_ram_post_init(struct persistent_ram_zone * prz,u32 sig,struct persistent_ram_ecc_info * ecc_info)526 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
527 struct persistent_ram_ecc_info *ecc_info)
528 {
529 int ret;
530 bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
531
532 ret = persistent_ram_init_ecc(prz, ecc_info);
533 if (ret) {
534 pr_warn("ECC failed %s\n", prz->label);
535 return ret;
536 }
537
538 sig ^= PERSISTENT_RAM_SIG;
539
540 if (prz->buffer->sig == sig) {
541 if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
542 pr_debug("found existing empty buffer\n");
543 return 0;
544 }
545
546 if (buffer_size(prz) > prz->buffer_size ||
547 buffer_start(prz) > buffer_size(prz)) {
548 pr_info("found existing invalid buffer, size %zu, start %zu\n",
549 buffer_size(prz), buffer_start(prz));
550 zap = true;
551 } else {
552 pr_debug("found existing buffer, size %zu, start %zu\n",
553 buffer_size(prz), buffer_start(prz));
554 persistent_ram_save_old(prz);
555 }
556 } else {
557 pr_debug("no valid data in buffer (sig = 0x%08x)\n",
558 prz->buffer->sig);
559 prz->buffer->sig = sig;
560 zap = true;
561 }
562
563 /* Reset missing, invalid, or single-use memory area. */
564 if (zap)
565 persistent_ram_zap(prz);
566
567 return 0;
568 }
569
persistent_ram_free(struct persistent_ram_zone ** _prz)570 void persistent_ram_free(struct persistent_ram_zone **_prz)
571 {
572 struct persistent_ram_zone *prz;
573
574 if (!_prz)
575 return;
576
577 prz = *_prz;
578 if (!prz)
579 return;
580
581 if (prz->vaddr) {
582 if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
583 /* We must vunmap() at page-granularity. */
584 vunmap(prz->vaddr - offset_in_page(prz->paddr));
585 } else {
586 iounmap(prz->vaddr);
587 release_mem_region(prz->paddr, prz->size);
588 }
589 prz->vaddr = NULL;
590 }
591 if (prz->rs_decoder) {
592 free_rs(prz->rs_decoder);
593 prz->rs_decoder = NULL;
594 }
595 kfree(prz->ecc_info.par);
596 prz->ecc_info.par = NULL;
597
598 persistent_ram_free_old(prz);
599 kfree(prz->label);
600 kfree(prz);
601 *_prz = NULL;
602 }
603
persistent_ram_new(phys_addr_t start,size_t size,u32 sig,struct persistent_ram_ecc_info * ecc_info,unsigned int memtype,u32 flags,char * label)604 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
605 u32 sig, struct persistent_ram_ecc_info *ecc_info,
606 unsigned int memtype, u32 flags, char *label)
607 {
608 struct persistent_ram_zone *prz;
609 int ret = -ENOMEM;
610
611 prz = kzalloc_obj(struct persistent_ram_zone);
612 if (!prz) {
613 pr_err("failed to allocate persistent ram zone\n");
614 goto err;
615 }
616
617 /* Initialize general buffer state. */
618 raw_spin_lock_init(&prz->buffer_lock);
619 prz->flags = flags;
620 prz->label = kstrdup(label, GFP_KERNEL);
621 if (!prz->label)
622 goto err;
623
624 ret = persistent_ram_buffer_map(start, size, prz, memtype);
625 if (ret)
626 goto err;
627
628 ret = persistent_ram_post_init(prz, sig, ecc_info);
629 if (ret)
630 goto err;
631
632 pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
633 prz->label, prz->size, (unsigned long long)prz->paddr,
634 sizeof(*prz->buffer), prz->buffer_size,
635 prz->size - sizeof(*prz->buffer) - prz->buffer_size,
636 prz->ecc_info.ecc_size, prz->ecc_info.block_size);
637
638 return prz;
639 err:
640 persistent_ram_free(&prz);
641 return ERR_PTR(ret);
642 }
643