1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * A code-rewriter that enables instruction single-stepping.
15  * Derived from iLib's single-stepping code.
16  */
17 
18 #ifndef __tilegx__   /* Hardware support for single step unavailable. */
19 
20 /* These functions are only used on the TILE platform */
21 #include <linux/slab.h>
22 #include <linux/thread_info.h>
23 #include <linux/uaccess.h>
24 #include <linux/mman.h>
25 #include <linux/types.h>
26 #include <linux/err.h>
27 #include <asm/cacheflush.h>
28 #include <arch/abi.h>
29 #include <arch/opcode.h>
30 
31 #define signExtend17(val) sign_extend((val), 17)
32 #define TILE_X1_MASK (0xffffffffULL << 31)
33 
34 int unaligned_printk;
35 
setup_unaligned_printk(char * str)36 static int __init setup_unaligned_printk(char *str)
37 {
38 	long val;
39 	if (strict_strtol(str, 0, &val) != 0)
40 		return 0;
41 	unaligned_printk = val;
42 	pr_info("Printk for each unaligned data accesses is %s\n",
43 		unaligned_printk ? "enabled" : "disabled");
44 	return 1;
45 }
46 __setup("unaligned_printk=", setup_unaligned_printk);
47 
48 unsigned int unaligned_fixup_count;
49 
50 enum mem_op {
51 	MEMOP_NONE,
52 	MEMOP_LOAD,
53 	MEMOP_STORE,
54 	MEMOP_LOAD_POSTINCR,
55 	MEMOP_STORE_POSTINCR
56 };
57 
set_BrOff_X1(tile_bundle_bits n,s32 offset)58 static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
59 {
60 	tile_bundle_bits result;
61 
62 	/* mask out the old offset */
63 	tile_bundle_bits mask = create_BrOff_X1(-1);
64 	result = n & (~mask);
65 
66 	/* or in the new offset */
67 	result |= create_BrOff_X1(offset);
68 
69 	return result;
70 }
71 
move_X1(tile_bundle_bits n,int dest,int src)72 static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
73 {
74 	tile_bundle_bits result;
75 	tile_bundle_bits op;
76 
77 	result = n & (~TILE_X1_MASK);
78 
79 	op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
80 		create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
81 		create_Dest_X1(dest) |
82 		create_SrcB_X1(TREG_ZERO) |
83 		create_SrcA_X1(src) ;
84 
85 	result |= op;
86 	return result;
87 }
88 
nop_X1(tile_bundle_bits n)89 static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
90 {
91 	return move_X1(n, TREG_ZERO, TREG_ZERO);
92 }
93 
addi_X1(tile_bundle_bits n,int dest,int src,int imm)94 static inline tile_bundle_bits addi_X1(
95 	tile_bundle_bits n, int dest, int src, int imm)
96 {
97 	n &= ~TILE_X1_MASK;
98 
99 	n |=  (create_SrcA_X1(src) |
100 	       create_Dest_X1(dest) |
101 	       create_Imm8_X1(imm) |
102 	       create_S_X1(0) |
103 	       create_Opcode_X1(IMM_0_OPCODE_X1) |
104 	       create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
105 
106 	return n;
107 }
108 
rewrite_load_store_unaligned(struct single_step_state * state,tile_bundle_bits bundle,struct pt_regs * regs,enum mem_op mem_op,int size,int sign_ext)109 static tile_bundle_bits rewrite_load_store_unaligned(
110 	struct single_step_state *state,
111 	tile_bundle_bits bundle,
112 	struct pt_regs *regs,
113 	enum mem_op mem_op,
114 	int size, int sign_ext)
115 {
116 	unsigned char __user *addr;
117 	int val_reg, addr_reg, err, val;
118 
119 	/* Get address and value registers */
120 	if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
121 		addr_reg = get_SrcA_Y2(bundle);
122 		val_reg = get_SrcBDest_Y2(bundle);
123 	} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
124 		addr_reg = get_SrcA_X1(bundle);
125 		val_reg  = get_Dest_X1(bundle);
126 	} else {
127 		addr_reg = get_SrcA_X1(bundle);
128 		val_reg  = get_SrcB_X1(bundle);
129 	}
130 
131 	/*
132 	 * If registers are not GPRs, don't try to handle it.
133 	 *
134 	 * FIXME: we could handle non-GPR loads by getting the real value
135 	 * from memory, writing it to the single step buffer, using a
136 	 * temp_reg to hold a pointer to that memory, then executing that
137 	 * instruction and resetting temp_reg.  For non-GPR stores, it's a
138 	 * little trickier; we could use the single step buffer for that
139 	 * too, but we'd have to add some more state bits so that we could
140 	 * call back in here to copy that value to the real target.  For
141 	 * now, we just handle the simple case.
142 	 */
143 	if ((val_reg >= PTREGS_NR_GPRS &&
144 	     (val_reg != TREG_ZERO ||
145 	      mem_op == MEMOP_LOAD ||
146 	      mem_op == MEMOP_LOAD_POSTINCR)) ||
147 	    addr_reg >= PTREGS_NR_GPRS)
148 		return bundle;
149 
150 	/* If it's aligned, don't handle it specially */
151 	addr = (void __user *)regs->regs[addr_reg];
152 	if (((unsigned long)addr % size) == 0)
153 		return bundle;
154 
155 #ifndef __LITTLE_ENDIAN
156 # error We assume little-endian representation with copy_xx_user size 2 here
157 #endif
158 	/* Handle unaligned load/store */
159 	if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
160 		unsigned short val_16;
161 		switch (size) {
162 		case 2:
163 			err = copy_from_user(&val_16, addr, sizeof(val_16));
164 			val = sign_ext ? ((short)val_16) : val_16;
165 			break;
166 		case 4:
167 			err = copy_from_user(&val, addr, sizeof(val));
168 			break;
169 		default:
170 			BUG();
171 		}
172 		if (err == 0) {
173 			state->update_reg = val_reg;
174 			state->update_value = val;
175 			state->update = 1;
176 		}
177 	} else {
178 		val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
179 		err = copy_to_user(addr, &val, size);
180 	}
181 
182 	if (err) {
183 		siginfo_t info = {
184 			.si_signo = SIGSEGV,
185 			.si_code = SEGV_MAPERR,
186 			.si_addr = addr
187 		};
188 		trace_unhandled_signal("segfault", regs,
189 				       (unsigned long)addr, SIGSEGV);
190 		force_sig_info(info.si_signo, &info, current);
191 		return (tile_bundle_bits) 0;
192 	}
193 
194 	if (unaligned_fixup == 0) {
195 		siginfo_t info = {
196 			.si_signo = SIGBUS,
197 			.si_code = BUS_ADRALN,
198 			.si_addr = addr
199 		};
200 		trace_unhandled_signal("unaligned trap", regs,
201 				       (unsigned long)addr, SIGBUS);
202 		force_sig_info(info.si_signo, &info, current);
203 		return (tile_bundle_bits) 0;
204 	}
205 
206 	if (unaligned_printk || unaligned_fixup_count == 0) {
207 		pr_info("Process %d/%s: PC %#lx: Fixup of"
208 			" unaligned %s at %#lx.\n",
209 			current->pid, current->comm, regs->pc,
210 			(mem_op == MEMOP_LOAD ||
211 			 mem_op == MEMOP_LOAD_POSTINCR) ?
212 			"load" : "store",
213 			(unsigned long)addr);
214 		if (!unaligned_printk) {
215 #define P pr_info
216 P("\n");
217 P("Unaligned fixups in the kernel will slow your application considerably.\n");
218 P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
219 P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
220 P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
221 P("access will become a SIGBUS you can debug. No further warnings will be\n");
222 P("shown so as to avoid additional slowdown, but you can track the number\n");
223 P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
224 P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
225 P("\n");
226 #undef P
227 		}
228 	}
229 	++unaligned_fixup_count;
230 
231 	if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
232 		/* Convert the Y2 instruction to a prefetch. */
233 		bundle &= ~(create_SrcBDest_Y2(-1) |
234 			    create_Opcode_Y2(-1));
235 		bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
236 			   create_Opcode_Y2(LW_OPCODE_Y2));
237 	/* Replace the load postincr with an addi */
238 	} else if (mem_op == MEMOP_LOAD_POSTINCR) {
239 		bundle = addi_X1(bundle, addr_reg, addr_reg,
240 				 get_Imm8_X1(bundle));
241 	/* Replace the store postincr with an addi */
242 	} else if (mem_op == MEMOP_STORE_POSTINCR) {
243 		bundle = addi_X1(bundle, addr_reg, addr_reg,
244 				 get_Dest_Imm8_X1(bundle));
245 	} else {
246 		/* Convert the X1 instruction to a nop. */
247 		bundle &= ~(create_Opcode_X1(-1) |
248 			    create_UnShOpcodeExtension_X1(-1) |
249 			    create_UnOpcodeExtension_X1(-1));
250 		bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
251 			   create_UnShOpcodeExtension_X1(
252 				   UN_0_SHUN_0_OPCODE_X1) |
253 			   create_UnOpcodeExtension_X1(
254 				   NOP_UN_0_SHUN_0_OPCODE_X1));
255 	}
256 
257 	return bundle;
258 }
259 
260 /*
261  * Called after execve() has started the new image.  This allows us
262  * to reset the info state.  Note that the the mmap'ed memory, if there
263  * was any, has already been unmapped by the exec.
264  */
single_step_execve(void)265 void single_step_execve(void)
266 {
267 	struct thread_info *ti = current_thread_info();
268 	kfree(ti->step_state);
269 	ti->step_state = NULL;
270 }
271 
272 /**
273  * single_step_once() - entry point when single stepping has been triggered.
274  * @regs: The machine register state
275  *
276  *  When we arrive at this routine via a trampoline, the single step
277  *  engine copies the executing bundle to the single step buffer.
278  *  If the instruction is a condition branch, then the target is
279  *  reset to one past the next instruction. If the instruction
280  *  sets the lr, then that is noted. If the instruction is a jump
281  *  or call, then the new target pc is preserved and the current
282  *  bundle instruction set to null.
283  *
284  *  The necessary post-single-step rewriting information is stored in
285  *  single_step_state->  We use data segment values because the
286  *  stack will be rewound when we run the rewritten single-stepped
287  *  instruction.
288  */
single_step_once(struct pt_regs * regs)289 void single_step_once(struct pt_regs *regs)
290 {
291 	extern tile_bundle_bits __single_step_ill_insn;
292 	extern tile_bundle_bits __single_step_j_insn;
293 	extern tile_bundle_bits __single_step_addli_insn;
294 	extern tile_bundle_bits __single_step_auli_insn;
295 	struct thread_info *info = (void *)current_thread_info();
296 	struct single_step_state *state = info->step_state;
297 	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
298 	tile_bundle_bits __user *buffer, *pc;
299 	tile_bundle_bits bundle;
300 	int temp_reg;
301 	int target_reg = TREG_LR;
302 	int err;
303 	enum mem_op mem_op = MEMOP_NONE;
304 	int size = 0, sign_ext = 0;  /* happy compiler */
305 
306 	asm(
307 "    .pushsection .rodata.single_step\n"
308 "    .align 8\n"
309 "    .globl    __single_step_ill_insn\n"
310 "__single_step_ill_insn:\n"
311 "    ill\n"
312 "    .globl    __single_step_addli_insn\n"
313 "__single_step_addli_insn:\n"
314 "    { nop; addli r0, zero, 0 }\n"
315 "    .globl    __single_step_auli_insn\n"
316 "__single_step_auli_insn:\n"
317 "    { nop; auli r0, r0, 0 }\n"
318 "    .globl    __single_step_j_insn\n"
319 "__single_step_j_insn:\n"
320 "    j .\n"
321 "    .popsection\n"
322 	);
323 
324 	/*
325 	 * Enable interrupts here to allow touching userspace and the like.
326 	 * The callers expect this: do_trap() already has interrupts
327 	 * enabled, and do_work_pending() handles functions that enable
328 	 * interrupts internally.
329 	 */
330 	local_irq_enable();
331 
332 	if (state == NULL) {
333 		/* allocate a page of writable, executable memory */
334 		state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
335 		if (state == NULL) {
336 			pr_err("Out of kernel memory trying to single-step\n");
337 			return;
338 		}
339 
340 		/* allocate a cache line of writable, executable memory */
341 		down_write(&current->mm->mmap_sem);
342 		buffer = (void __user *) do_mmap(NULL, 0, 64,
343 					  PROT_EXEC | PROT_READ | PROT_WRITE,
344 					  MAP_PRIVATE | MAP_ANONYMOUS,
345 					  0);
346 		up_write(&current->mm->mmap_sem);
347 
348 		if (IS_ERR((void __force *)buffer)) {
349 			kfree(state);
350 			pr_err("Out of kernel pages trying to single-step\n");
351 			return;
352 		}
353 
354 		state->buffer = buffer;
355 		state->is_enabled = 0;
356 
357 		info->step_state = state;
358 
359 		/* Validate our stored instruction patterns */
360 		BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
361 		       ADDLI_OPCODE_X1);
362 		BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
363 		       AULI_OPCODE_X1);
364 		BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
365 		BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
366 		BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
367 	}
368 
369 	/*
370 	 * If we are returning from a syscall, we still haven't hit the
371 	 * "ill" for the swint1 instruction.  So back the PC up to be
372 	 * pointing at the swint1, but we'll actually return directly
373 	 * back to the "ill" so we come back in via SIGILL as if we
374 	 * had "executed" the swint1 without ever being in kernel space.
375 	 */
376 	if (regs->faultnum == INT_SWINT_1)
377 		regs->pc -= 8;
378 
379 	pc = (tile_bundle_bits __user *)(regs->pc);
380 	if (get_user(bundle, pc) != 0) {
381 		pr_err("Couldn't read instruction at %p trying to step\n", pc);
382 		return;
383 	}
384 
385 	/* We'll follow the instruction with 2 ill op bundles */
386 	state->orig_pc = (unsigned long)pc;
387 	state->next_pc = (unsigned long)(pc + 1);
388 	state->branch_next_pc = 0;
389 	state->update = 0;
390 
391 	if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
392 		/* two wide, check for control flow */
393 		int opcode = get_Opcode_X1(bundle);
394 
395 		switch (opcode) {
396 		/* branches */
397 		case BRANCH_OPCODE_X1:
398 		{
399 			s32 offset = signExtend17(get_BrOff_X1(bundle));
400 
401 			/*
402 			 * For branches, we use a rewriting trick to let the
403 			 * hardware evaluate whether the branch is taken or
404 			 * untaken.  We record the target offset and then
405 			 * rewrite the branch instruction to target 1 insn
406 			 * ahead if the branch is taken.  We then follow the
407 			 * rewritten branch with two bundles, each containing
408 			 * an "ill" instruction. The supervisor examines the
409 			 * pc after the single step code is executed, and if
410 			 * the pc is the first ill instruction, then the
411 			 * branch (if any) was not taken.  If the pc is the
412 			 * second ill instruction, then the branch was
413 			 * taken. The new pc is computed for these cases, and
414 			 * inserted into the registers for the thread.  If
415 			 * the pc is the start of the single step code, then
416 			 * an exception or interrupt was taken before the
417 			 * code started processing, and the same "original"
418 			 * pc is restored.  This change, different from the
419 			 * original implementation, has the advantage of
420 			 * executing a single user instruction.
421 			 */
422 			state->branch_next_pc = (unsigned long)(pc + offset);
423 
424 			/* rewrite branch offset to go forward one bundle */
425 			bundle = set_BrOff_X1(bundle, 2);
426 		}
427 		break;
428 
429 		/* jumps */
430 		case JALB_OPCODE_X1:
431 		case JALF_OPCODE_X1:
432 			state->update = 1;
433 			state->next_pc =
434 				(unsigned long) (pc + get_JOffLong_X1(bundle));
435 			break;
436 
437 		case JB_OPCODE_X1:
438 		case JF_OPCODE_X1:
439 			state->next_pc =
440 				(unsigned long) (pc + get_JOffLong_X1(bundle));
441 			bundle = nop_X1(bundle);
442 			break;
443 
444 		case SPECIAL_0_OPCODE_X1:
445 			switch (get_RRROpcodeExtension_X1(bundle)) {
446 			/* jump-register */
447 			case JALRP_SPECIAL_0_OPCODE_X1:
448 			case JALR_SPECIAL_0_OPCODE_X1:
449 				state->update = 1;
450 				state->next_pc =
451 					regs->regs[get_SrcA_X1(bundle)];
452 				break;
453 
454 			case JRP_SPECIAL_0_OPCODE_X1:
455 			case JR_SPECIAL_0_OPCODE_X1:
456 				state->next_pc =
457 					regs->regs[get_SrcA_X1(bundle)];
458 				bundle = nop_X1(bundle);
459 				break;
460 
461 			case LNK_SPECIAL_0_OPCODE_X1:
462 				state->update = 1;
463 				target_reg = get_Dest_X1(bundle);
464 				break;
465 
466 			/* stores */
467 			case SH_SPECIAL_0_OPCODE_X1:
468 				mem_op = MEMOP_STORE;
469 				size = 2;
470 				break;
471 
472 			case SW_SPECIAL_0_OPCODE_X1:
473 				mem_op = MEMOP_STORE;
474 				size = 4;
475 				break;
476 			}
477 			break;
478 
479 		/* loads and iret */
480 		case SHUN_0_OPCODE_X1:
481 			if (get_UnShOpcodeExtension_X1(bundle) ==
482 			    UN_0_SHUN_0_OPCODE_X1) {
483 				switch (get_UnOpcodeExtension_X1(bundle)) {
484 				case LH_UN_0_SHUN_0_OPCODE_X1:
485 					mem_op = MEMOP_LOAD;
486 					size = 2;
487 					sign_ext = 1;
488 					break;
489 
490 				case LH_U_UN_0_SHUN_0_OPCODE_X1:
491 					mem_op = MEMOP_LOAD;
492 					size = 2;
493 					sign_ext = 0;
494 					break;
495 
496 				case LW_UN_0_SHUN_0_OPCODE_X1:
497 					mem_op = MEMOP_LOAD;
498 					size = 4;
499 					break;
500 
501 				case IRET_UN_0_SHUN_0_OPCODE_X1:
502 				{
503 					unsigned long ex0_0 = __insn_mfspr(
504 						SPR_EX_CONTEXT_0_0);
505 					unsigned long ex0_1 = __insn_mfspr(
506 						SPR_EX_CONTEXT_0_1);
507 					/*
508 					 * Special-case it if we're iret'ing
509 					 * to PL0 again.  Otherwise just let
510 					 * it run and it will generate SIGILL.
511 					 */
512 					if (EX1_PL(ex0_1) == USER_PL) {
513 						state->next_pc = ex0_0;
514 						regs->ex1 = ex0_1;
515 						bundle = nop_X1(bundle);
516 					}
517 				}
518 				}
519 			}
520 			break;
521 
522 #if CHIP_HAS_WH64()
523 		/* postincrement operations */
524 		case IMM_0_OPCODE_X1:
525 			switch (get_ImmOpcodeExtension_X1(bundle)) {
526 			case LWADD_IMM_0_OPCODE_X1:
527 				mem_op = MEMOP_LOAD_POSTINCR;
528 				size = 4;
529 				break;
530 
531 			case LHADD_IMM_0_OPCODE_X1:
532 				mem_op = MEMOP_LOAD_POSTINCR;
533 				size = 2;
534 				sign_ext = 1;
535 				break;
536 
537 			case LHADD_U_IMM_0_OPCODE_X1:
538 				mem_op = MEMOP_LOAD_POSTINCR;
539 				size = 2;
540 				sign_ext = 0;
541 				break;
542 
543 			case SWADD_IMM_0_OPCODE_X1:
544 				mem_op = MEMOP_STORE_POSTINCR;
545 				size = 4;
546 				break;
547 
548 			case SHADD_IMM_0_OPCODE_X1:
549 				mem_op = MEMOP_STORE_POSTINCR;
550 				size = 2;
551 				break;
552 
553 			default:
554 				break;
555 			}
556 			break;
557 #endif /* CHIP_HAS_WH64() */
558 		}
559 
560 		if (state->update) {
561 			/*
562 			 * Get an available register.  We start with a
563 			 * bitmask with 1's for available registers.
564 			 * We truncate to the low 32 registers since
565 			 * we are guaranteed to have set bits in the
566 			 * low 32 bits, then use ctz to pick the first.
567 			 */
568 			u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
569 					   (1ULL << get_SrcA_X0(bundle)) |
570 					   (1ULL << get_SrcB_X0(bundle)) |
571 					   (1ULL << target_reg));
572 			temp_reg = __builtin_ctz(mask);
573 			state->update_reg = temp_reg;
574 			state->update_value = regs->regs[temp_reg];
575 			regs->regs[temp_reg] = (unsigned long) (pc+1);
576 			regs->flags |= PT_FLAGS_RESTORE_REGS;
577 			bundle = move_X1(bundle, target_reg, temp_reg);
578 		}
579 	} else {
580 		int opcode = get_Opcode_Y2(bundle);
581 
582 		switch (opcode) {
583 		/* loads */
584 		case LH_OPCODE_Y2:
585 			mem_op = MEMOP_LOAD;
586 			size = 2;
587 			sign_ext = 1;
588 			break;
589 
590 		case LH_U_OPCODE_Y2:
591 			mem_op = MEMOP_LOAD;
592 			size = 2;
593 			sign_ext = 0;
594 			break;
595 
596 		case LW_OPCODE_Y2:
597 			mem_op = MEMOP_LOAD;
598 			size = 4;
599 			break;
600 
601 		/* stores */
602 		case SH_OPCODE_Y2:
603 			mem_op = MEMOP_STORE;
604 			size = 2;
605 			break;
606 
607 		case SW_OPCODE_Y2:
608 			mem_op = MEMOP_STORE;
609 			size = 4;
610 			break;
611 		}
612 	}
613 
614 	/*
615 	 * Check if we need to rewrite an unaligned load/store.
616 	 * Returning zero is a special value meaning we need to SIGSEGV.
617 	 */
618 	if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
619 		bundle = rewrite_load_store_unaligned(state, bundle, regs,
620 						      mem_op, size, sign_ext);
621 		if (bundle == 0)
622 			return;
623 	}
624 
625 	/* write the bundle to our execution area */
626 	buffer = state->buffer;
627 	err = __put_user(bundle, buffer++);
628 
629 	/*
630 	 * If we're really single-stepping, we take an INT_ILL after.
631 	 * If we're just handling an unaligned access, we can just
632 	 * jump directly back to where we were in user code.
633 	 */
634 	if (is_single_step) {
635 		err |= __put_user(__single_step_ill_insn, buffer++);
636 		err |= __put_user(__single_step_ill_insn, buffer++);
637 	} else {
638 		long delta;
639 
640 		if (state->update) {
641 			/* We have some state to update; do it inline */
642 			int ha16;
643 			bundle = __single_step_addli_insn;
644 			bundle |= create_Dest_X1(state->update_reg);
645 			bundle |= create_Imm16_X1(state->update_value);
646 			err |= __put_user(bundle, buffer++);
647 			bundle = __single_step_auli_insn;
648 			bundle |= create_Dest_X1(state->update_reg);
649 			bundle |= create_SrcA_X1(state->update_reg);
650 			ha16 = (state->update_value + 0x8000) >> 16;
651 			bundle |= create_Imm16_X1(ha16);
652 			err |= __put_user(bundle, buffer++);
653 			state->update = 0;
654 		}
655 
656 		/* End with a jump back to the next instruction */
657 		delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
658 			(unsigned long)buffer) >>
659 			TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
660 		bundle = __single_step_j_insn;
661 		bundle |= create_JOffLong_X1(delta);
662 		err |= __put_user(bundle, buffer++);
663 	}
664 
665 	if (err) {
666 		pr_err("Fault when writing to single-step buffer\n");
667 		return;
668 	}
669 
670 	/*
671 	 * Flush the buffer.
672 	 * We do a local flush only, since this is a thread-specific buffer.
673 	 */
674 	__flush_icache_range((unsigned long)state->buffer,
675 			     (unsigned long)buffer);
676 
677 	/* Indicate enabled */
678 	state->is_enabled = is_single_step;
679 	regs->pc = (unsigned long)state->buffer;
680 
681 	/* Fault immediately if we are coming back from a syscall. */
682 	if (regs->faultnum == INT_SWINT_1)
683 		regs->pc += 8;
684 }
685 
686 #else
687 #include <linux/smp.h>
688 #include <linux/ptrace.h>
689 #include <arch/spr_def.h>
690 
691 static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
692 
693 
694 /*
695  * Called directly on the occasion of an interrupt.
696  *
697  * If the process doesn't have single step set, then we use this as an
698  * opportunity to turn single step off.
699  *
700  * It has been mentioned that we could conditionally turn off single stepping
701  * on each entry into the kernel and rely on single_step_once to turn it
702  * on for the processes that matter (as we already do), but this
703  * implementation is somewhat more efficient in that we muck with registers
704  * once on a bum interrupt rather than on every entry into the kernel.
705  *
706  * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
707  * so we have to run through this process again before we can say that an
708  * instruction has executed.
709  *
710  * swint will set CANCELED, but it's a legitimate instruction.  Fortunately
711  * it changes the PC.  If it hasn't changed, then we know that the interrupt
712  * wasn't generated by swint and we'll need to run this process again before
713  * we can say an instruction has executed.
714  *
715  * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
716  * on with our lives.
717  */
718 
gx_singlestep_handle(struct pt_regs * regs,int fault_num)719 void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
720 {
721 	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
722 	struct thread_info *info = (void *)current_thread_info();
723 	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
724 	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
725 
726 	if (is_single_step == 0) {
727 		__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
728 
729 	} else if ((*ss_pc != regs->pc) ||
730 		   (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
731 
732 		ptrace_notify(SIGTRAP);
733 		control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
734 		control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
735 		__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
736 	}
737 }
738 
739 
740 /*
741  * Called from need_singlestep.  Set up the control registers and the enable
742  * register, then return back.
743  */
744 
single_step_once(struct pt_regs * regs)745 void single_step_once(struct pt_regs *regs)
746 {
747 	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
748 	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
749 
750 	*ss_pc = regs->pc;
751 	control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
752 	control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
753 	__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
754 	__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
755 }
756 
single_step_execve(void)757 void single_step_execve(void)
758 {
759 	/* Nothing */
760 }
761 
762 #endif /* !__tilegx__ */
763