1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * based on machine_kexec.c from other architectures in linux-2.6.18
15  */
16 
17 #include <linux/mm.h>
18 #include <linux/kexec.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/errno.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cpumask.h>
24 #include <linux/kernel.h>
25 #include <linux/elf.h>
26 #include <linux/highmem.h>
27 #include <linux/mmu_context.h>
28 #include <linux/io.h>
29 #include <linux/timex.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/cacheflush.h>
33 #include <asm/checksum.h>
34 #include <hv/hypervisor.h>
35 
36 
37 /*
38  * This stuff is not in elf.h and is not in any other kernel include.
39  * This stuff is needed below in the little boot notes parser to
40  * extract the command line so we can pass it to the hypervisor.
41  */
42 struct Elf32_Bhdr {
43 	Elf32_Word b_signature;
44 	Elf32_Word b_size;
45 	Elf32_Half b_checksum;
46 	Elf32_Half b_records;
47 };
48 #define ELF_BOOT_MAGIC		0x0E1FB007
49 #define EBN_COMMAND_LINE	0x00000004
50 #define roundupsz(X) (((X) + 3) & ~3)
51 
52 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
53 
54 
machine_shutdown(void)55 void machine_shutdown(void)
56 {
57 	/*
58 	 * Normally we would stop all the other processors here, but
59 	 * the check in machine_kexec_prepare below ensures we'll only
60 	 * get this far if we've been booted with "nosmp" on the
61 	 * command line or without CONFIG_SMP so there's nothing to do
62 	 * here (for now).
63 	 */
64 }
65 
machine_crash_shutdown(struct pt_regs * regs)66 void machine_crash_shutdown(struct pt_regs *regs)
67 {
68 	/*
69 	 * Cannot happen.  This type of kexec is disabled on this
70 	 * architecture (and enforced in machine_kexec_prepare below).
71 	 */
72 }
73 
74 
machine_kexec_prepare(struct kimage * image)75 int machine_kexec_prepare(struct kimage *image)
76 {
77 	if (num_online_cpus() > 1) {
78 		pr_warning("%s: detected attempt to kexec "
79 		       "with num_online_cpus() > 1\n",
80 		       __func__);
81 		return -ENOSYS;
82 	}
83 	if (image->type != KEXEC_TYPE_DEFAULT) {
84 		pr_warning("%s: detected attempt to kexec "
85 		       "with unsupported type: %d\n",
86 		       __func__,
87 		       image->type);
88 		return -ENOSYS;
89 	}
90 	return 0;
91 }
92 
machine_kexec_cleanup(struct kimage * image)93 void machine_kexec_cleanup(struct kimage *image)
94 {
95 	/*
96 	 * We did nothing in machine_kexec_prepare,
97 	 * so we have nothing to do here.
98 	 */
99 }
100 
101 /*
102  * If we can find elf boot notes on this page, return the command
103  * line.  Otherwise, silently return null.  Somewhat kludgy, but no
104  * good way to do this without significantly rearchitecting the
105  * architecture-independent kexec code.
106  */
107 
kexec_bn2cl(void * pg)108 static unsigned char *kexec_bn2cl(void *pg)
109 {
110 	struct Elf32_Bhdr *bhdrp;
111 	Elf32_Nhdr *nhdrp;
112 	unsigned char *desc;
113 	unsigned char *command_line;
114 	__sum16 csum;
115 
116 	bhdrp = (struct Elf32_Bhdr *) pg;
117 
118 	/*
119 	 * This routine is invoked for every source page, so make
120 	 * sure to quietly ignore every impossible page.
121 	 */
122 	if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
123 	    bhdrp->b_size > PAGE_SIZE)
124 		return 0;
125 
126 	/*
127 	 * If we get a checksum mismatch, warn with the checksum
128 	 * so we can diagnose better.
129 	 */
130 	csum = ip_compute_csum(pg, bhdrp->b_size);
131 	if (csum != 0) {
132 		pr_warning("%s: bad checksum %#x (size %d)\n",
133 			   __func__, csum, bhdrp->b_size);
134 		return 0;
135 	}
136 
137 	nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
138 
139 	while (nhdrp->n_type != EBN_COMMAND_LINE) {
140 
141 		desc = (unsigned char *) (nhdrp + 1);
142 		desc += roundupsz(nhdrp->n_descsz);
143 
144 		nhdrp = (Elf32_Nhdr *) desc;
145 
146 		/* still in bounds? */
147 		if ((unsigned char *) (nhdrp + 1) >
148 		    ((unsigned char *) pg) + bhdrp->b_size) {
149 
150 			pr_info("%s: out of bounds\n", __func__);
151 			return 0;
152 		}
153 	}
154 
155 	command_line = (unsigned char *) (nhdrp + 1);
156 	desc = command_line;
157 
158 	while (*desc != '\0') {
159 		desc++;
160 		if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
161 			pr_info("%s: ran off end of page\n",
162 			       __func__);
163 			return 0;
164 		}
165 	}
166 
167 	return command_line;
168 }
169 
kexec_find_and_set_command_line(struct kimage * image)170 static void kexec_find_and_set_command_line(struct kimage *image)
171 {
172 	kimage_entry_t *ptr, entry;
173 
174 	unsigned char *command_line = 0;
175 	unsigned char *r;
176 	HV_Errno hverr;
177 
178 	for (ptr = &image->head;
179 	     (entry = *ptr) && !(entry & IND_DONE);
180 	     ptr = (entry & IND_INDIRECTION) ?
181 		     phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
182 
183 		if ((entry & IND_SOURCE)) {
184 			void *va =
185 				kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 			r = kexec_bn2cl(va);
187 			if (r) {
188 				command_line = r;
189 				break;
190 			}
191 			kunmap_atomic(va);
192 		}
193 	}
194 
195 	if (command_line != 0) {
196 		pr_info("setting new command line to \"%s\"\n",
197 		       command_line);
198 
199 		hverr = hv_set_command_line(
200 			(HV_VirtAddr) command_line, strlen(command_line));
201 		kunmap_atomic(command_line);
202 	} else {
203 		pr_info("%s: no command line found; making empty\n",
204 		       __func__);
205 		hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
206 	}
207 	if (hverr)
208 		pr_warning("%s: hv_set_command_line returned error: %d\n",
209 			   __func__, hverr);
210 }
211 
212 /*
213  * The kexec code range-checks all its PAs, so to avoid having it run
214  * amok and allocate memory and then sequester it from every other
215  * controller, we force it to come from controller zero.  We also
216  * disable the oom-killer since if we do end up running out of memory,
217  * that almost certainly won't help.
218  */
kimage_alloc_pages_arch(gfp_t gfp_mask,unsigned int order)219 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
220 {
221 	gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
222 	return alloc_pages_node(0, gfp_mask, order);
223 }
224 
setup_quasi_va_is_pa(void)225 static void setup_quasi_va_is_pa(void)
226 {
227 	HV_PTE *pgtable;
228 	HV_PTE pte;
229 	int i;
230 
231 	/*
232 	 * Flush our TLB to prevent conflicts between the previous contents
233 	 * and the new stuff we're about to add.
234 	 */
235 	local_flush_tlb_all();
236 
237 	/* setup VA is PA, at least up to PAGE_OFFSET */
238 
239 	pgtable = (HV_PTE *)current->mm->pgd;
240 	pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
241 	pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
242 
243 	for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
244 		unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
245 		if (pfn_valid(pfn))
246 			__set_pte(&pgtable[i], pfn_pte(pfn, pte));
247 	}
248 }
249 
250 
machine_kexec(struct kimage * image)251 void machine_kexec(struct kimage *image)
252 {
253 	void *reboot_code_buffer;
254 	void (*rnk)(unsigned long, void *, unsigned long)
255 		__noreturn;
256 
257 	/* Mask all interrupts before starting to reboot. */
258 	interrupt_mask_set_mask(~0ULL);
259 
260 	kexec_find_and_set_command_line(image);
261 
262 	/*
263 	 * Adjust the home caching of the control page to be cached on
264 	 * this cpu, and copy the assembly helper into the control
265 	 * code page, which we map in the vmalloc area.
266 	 */
267 	homecache_change_page_home(image->control_code_page, 0,
268 				   smp_processor_id());
269 	reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
270 				  __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
271 	memcpy(reboot_code_buffer, relocate_new_kernel,
272 	       relocate_new_kernel_size);
273 	__flush_icache_range(
274 		(unsigned long) reboot_code_buffer,
275 		(unsigned long) reboot_code_buffer + relocate_new_kernel_size);
276 
277 	setup_quasi_va_is_pa();
278 
279 	/* now call it */
280 	rnk = reboot_code_buffer;
281 	(*rnk)(image->head, reboot_code_buffer, image->start);
282 }
283