Lines Matching refs:ret_stack
49 * The top of the ret_stack (when not empty) will always have a reference
60 * ret_stack: (the stack grows upward)
62 * ret_stack[SHADOW_STACK_OFFSET]
63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] |
65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] |
66 * ret_stack[SHADOW_STACK_MAX_OFFSET]
76 * +--------------------------------------------+ ( It is 4 words from the ret_stack)
90 * | | previous ret_stack)
92 * ret_stack[0]
163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
167 * ret_stack to store task specific state.
169 #define SHADOW_STACK_TASK_VARS(ret_stack) \
170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
224 /* Get the offset to the fgraph frame from a ret_stack value */
230 /* Get the type of word from a ret_stack value */
236 /* Get the data_index for a DATA type ret_stack word */
242 /* Get the data_size for a DATA type ret_stack word */
248 /* Get the word from the ret_stack at @offset */
251 return t->ret_stack[offset];
254 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
257 return __get_offset(t->ret_stack[offset]);
260 /* For BITMAP type: get the bitmask from the @offset at ret_stack */
264 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
267 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
271 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
275 /* For DATA type: get the data saved under the ret_stack word at @offset */
278 unsigned long val = t->ret_stack[offset];
283 return (void *)&t->ret_stack[offset];
286 /* Create the ret_stack word for a DATA type */
309 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
317 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
322 static void ret_stack_init_task_vars(unsigned long *ret_stack)
324 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
330 * fgraph_reserve_data - Reserve storage on the task's ret_stack
335 * task's ret_stack shadow stack, for a given fgraph_ops during
343 * ret_stack for the data, or if fgraph_reserve_data() was called
360 data = ¤t->ret_stack[curr_ret_stack];
369 current->ret_stack[curr_ret_stack - 1] = val;
375 current->ret_stack[curr_ret_stack - 1] = val;
405 * reserved on the task's ret_stack. This function returns the
417 * @offset: The offset into @t->ret_stack to find the ret_stack entry
418 * @frame_offset: Where to place the offset into @t->ret_stack of that entry
420 * Returns a pointer to the previous ret_stack below @offset or NULL
427 * ret_stack = get_ret_stack(task, offset, &offset);
428 * } while (ret_stack);
430 * Will iterate through all the ret_stack entries from curr_ret_stack
466 struct ftrace_ret_stack *ret_stack = NULL;
476 ret_stack = get_ret_stack(current, offset, &next_offset);
477 if (!ret_stack || --depth < 0)
482 if (!ret_stack)
566 struct ftrace_ret_stack *ret_stack;
573 if (!current->ret_stack)
582 * We must make sure the ret_stack is tested before we read
597 ret_stack = RET_STACK(current, offset);
601 current->ret_stack[offset] = val;
602 ret_stack->ret = ret;
605 * or an offset where to find the next ret_stack. Even though the
607 * offset to find the ret_stack before we increment the stack point.
625 current->ret_stack[offset] = val;
627 ret_stack->ret = ret;
628 ret_stack->func = func;
630 ret_stack->fp = frame_pointer;
632 ret_stack->retp = retp;
725 struct ftrace_ret_stack *ret_stack;
727 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
729 if (unlikely(!ret_stack)) {
731 WARN(1, "Bad function graph ret_stack pointer: %d",
753 if (unlikely(ret_stack->fp != frame_pointer)) {
757 ret_stack->fp,
759 (void *)ret_stack->func,
760 ret_stack->ret);
767 *ret = ret_stack->ret;
768 trace->func = ret_stack->func;
778 return ret_stack;
813 struct ftrace_ret_stack *ret_stack;
820 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
822 if (unlikely(!ret_stack)) {
857 * ret_stack structure, we need to make sure the update of
891 * will return the last saved ret_stack entry. If it is greater than
892 * zero, it will return the corresponding ret_stack for the depth
898 struct ftrace_ret_stack *ret_stack = NULL;
905 ret_stack = get_ret_stack(task, offset, &offset);
906 } while (ret_stack && --idx >= 0);
908 return ret_stack;
921 struct ftrace_ret_stack *ret_stack = NULL;
928 ret_stack = get_ret_stack(task, offset, &offset);
929 } while (ret_stack && ret_stack->ret == return_handler);
931 return ret_stack ? ret_stack->ret : 0;
957 struct ftrace_ret_stack *ret_stack;
969 ret_stack = get_ret_stack(task, i, &i);
970 if (!ret_stack)
974 * the ret_stack, which records "return_to_handler" as the return
980 if (ret_stack->retp == retp &&
981 ret_stack->ret != return_handler) {
983 return ret_stack->ret;
1058 if (t->ret_stack == NULL) {
1065 t->ret_stack = ret_stack_list[start++];
1106 graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1109 ret_stack_init_task_vars(ret_stack);
1113 /* make curr_ret_stack visible before we add the ret_stack */
1115 t->ret_stack = ret_stack;
1130 if (t->ret_stack)
1131 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1134 unsigned long *ret_stack;
1139 ret_stack = per_cpu(idle_ret_stack, cpu);
1140 if (!ret_stack) {
1141 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1142 if (!ret_stack)
1144 per_cpu(idle_ret_stack, cpu) = ret_stack;
1146 graph_init_task(t, ret_stack);
1153 /* Make sure we do not use the parent ret_stack */
1154 t->ret_stack = NULL;
1159 unsigned long *ret_stack;
1164 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1165 if (!ret_stack)
1167 graph_init_task(t, ret_stack);
1173 unsigned long *ret_stack = t->ret_stack;
1175 t->ret_stack = NULL;
1179 if (ret_stack) {
1182 kmem_cache_free(fgraph_stack_cachep, ret_stack);
1238 /* The cpu_boot init_task->ret_stack will never be freed */
1240 if (!idle_task(cpu)->ret_stack)
1265 if (idle_task(cpu)->ret_stack)
1271 if (t->ret_stack)
1313 /* The cpu_boot init_task->ret_stack will never be freed */
1316 if (!idle_task(cpu)->ret_stack)