1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef pr_fmt
4 #define pr_fmt(fmt) "stackprot: " fmt
5 #endif
6
7 #include <linux/export.h>
8 #include <linux/hex.h>
9 #include <linux/uaccess.h>
10 #include <linux/printk.h>
11 #include <asm/abs_lowcore.h>
12 #include <asm/sections.h>
13 #include <asm/machine.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/arch-stackprotector.h>
16
17 #ifdef __DECOMPRESSOR
18
19 #define DEBUGP boot_debug
20 #define EMERGP boot_emerg
21 #define PANIC boot_panic
22
23 #else /* __DECOMPRESSOR */
24
25 #define DEBUGP pr_debug
26 #define EMERGP pr_emerg
27 #define PANIC panic
28
29 #endif /* __DECOMPRESSOR */
30
31 int __bootdata_preserved(stack_protector_debug);
32
33 unsigned long __stack_chk_guard;
34 EXPORT_SYMBOL(__stack_chk_guard);
35
36 struct insn_ril {
37 u8 opc1 : 8;
38 u8 r1 : 4;
39 u8 opc2 : 4;
40 u32 imm;
41 } __packed;
42
43 /*
44 * Convert a virtual instruction address to a real instruction address. The
45 * decompressor needs to patch instructions within the kernel image based on
46 * their virtual addresses, while dynamic address translation is still
47 * disabled. Therefore a translation from virtual kernel image addresses to
48 * the corresponding physical addresses is required.
49 *
50 * After dynamic address translation is enabled and when the kernel needs to
51 * patch instructions such a translation is not required since the addresses
52 * are identical.
53 */
vaddress_to_insn(unsigned long vaddress)54 static struct insn_ril *vaddress_to_insn(unsigned long vaddress)
55 {
56 #ifdef __DECOMPRESSOR
57 return (struct insn_ril *)__kernel_pa(vaddress);
58 #else
59 return (struct insn_ril *)vaddress;
60 #endif
61 }
62
insn_to_vaddress(struct insn_ril * insn)63 static unsigned long insn_to_vaddress(struct insn_ril *insn)
64 {
65 #ifdef __DECOMPRESSOR
66 return (unsigned long)__kernel_va(insn);
67 #else
68 return (unsigned long)insn;
69 #endif
70 }
71
72 #define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1)
73
insn_ril_to_string(char * str,struct insn_ril * insn)74 static void insn_ril_to_string(char *str, struct insn_ril *insn)
75 {
76 u8 *ptr = (u8 *)insn;
77 int i;
78
79 for (i = 0; i < sizeof(*insn); i++)
80 hex_byte_pack(&str[2 * i], ptr[i]);
81 str[2 * i] = 0;
82 }
83
stack_protector_dump(struct insn_ril * old,struct insn_ril * new)84 static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new)
85 {
86 char ostr[INSN_RIL_STRING_SIZE];
87 char nstr[INSN_RIL_STRING_SIZE];
88
89 insn_ril_to_string(ostr, old);
90 insn_ril_to_string(nstr, new);
91 DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr);
92 }
93
stack_protector_verify(struct insn_ril * insn,unsigned long kernel_start)94 static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start)
95 {
96 char istr[INSN_RIL_STRING_SIZE];
97 unsigned long vaddress, offset;
98
99 /* larl */
100 if (insn->opc1 == 0xc0 && insn->opc2 == 0x0)
101 return 0;
102 /* lgrl */
103 if (insn->opc1 == 0xc4 && insn->opc2 == 0x8)
104 return 0;
105 insn_ril_to_string(istr, insn);
106 vaddress = insn_to_vaddress(insn);
107 if (__is_defined(__DECOMPRESSOR)) {
108 offset = (unsigned long)insn - kernel_start + TEXT_OFFSET;
109 EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr);
110 PANIC("Stackprotector error\n");
111 } else {
112 EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr);
113 }
114 return -EINVAL;
115 }
116
__stack_protector_apply(unsigned long * start,unsigned long * end,unsigned long kernel_start)117 int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start)
118 {
119 unsigned long canary, *loc;
120 struct insn_ril *insn, new;
121 int rc;
122
123 /*
124 * Convert LARL/LGRL instructions to LLILF so register R1 contains the
125 * address of the per-cpu / per-process stack canary:
126 *
127 * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary
128 */
129 canary = __LC_STACK_CANARY;
130 if (machine_has_relocated_lowcore())
131 canary += LOWCORE_ALT_ADDRESS;
132 for (loc = start; loc < end; loc++) {
133 insn = vaddress_to_insn(*loc);
134 rc = stack_protector_verify(insn, kernel_start);
135 if (rc)
136 return rc;
137 new = *insn;
138 new.opc1 = 0xc0;
139 new.opc2 = 0xf;
140 new.imm = canary;
141 if (stack_protector_debug)
142 stack_protector_dump(insn, &new);
143 s390_kernel_write(insn, &new, sizeof(*insn));
144 }
145 return 0;
146 }
147
148 #ifdef __DECOMPRESSOR
__stack_protector_apply_early(unsigned long kernel_start)149 void __stack_protector_apply_early(unsigned long kernel_start)
150 {
151 unsigned long *start, *end;
152
153 start = (unsigned long *)vmlinux.stack_prot_start;
154 end = (unsigned long *)vmlinux.stack_prot_end;
155 __stack_protector_apply(start, end, kernel_start);
156 }
157 #endif
158