1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/security.h> 22 #include <linux/seq_file.h> 23 #include <linux/tracefs.h> 24 #include <linux/hardirq.h> 25 #include <linux/kthread.h> 26 #include <linux/uaccess.h> 27 #include <linux/bsearch.h> 28 #include <linux/module.h> 29 #include <linux/ftrace.h> 30 #include <linux/sysctl.h> 31 #include <linux/slab.h> 32 #include <linux/ctype.h> 33 #include <linux/sort.h> 34 #include <linux/list.h> 35 #include <linux/hash.h> 36 #include <linux/rcupdate.h> 37 #include <linux/kprobes.h> 38 39 #include <trace/events/sched.h> 40 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 44 #include "ftrace_internal.h" 45 #include "trace_output.h" 46 #include "trace_stat.h" 47 48 /* Flags that do not get reset */ 49 #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \ 50 FTRACE_FL_MODIFIED) 51 52 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" 53 54 #define FTRACE_WARN_ON(cond) \ 55 ({ \ 56 int ___r = cond; \ 57 if (WARN_ON(___r)) \ 58 ftrace_kill(); \ 59 ___r; \ 60 }) 61 62 #define FTRACE_WARN_ON_ONCE(cond) \ 63 ({ \ 64 int ___r = cond; \ 65 if (WARN_ON_ONCE(___r)) \ 66 ftrace_kill(); \ 67 ___r; \ 68 }) 69 70 /* hash bits for specific function selection */ 71 #define FTRACE_HASH_DEFAULT_BITS 10 72 #define FTRACE_HASH_MAX_BITS 12 73 74 #ifdef CONFIG_DYNAMIC_FTRACE 75 #define INIT_OPS_HASH(opsname) \ 76 .func_hash = &opsname.local_hash, \ 77 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \ 78 .subop_list = LIST_HEAD_INIT(opsname.subop_list), 79 #else 80 #define INIT_OPS_HASH(opsname) 81 #endif 82 83 enum { 84 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 85 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 86 }; 87 88 struct ftrace_ops ftrace_list_end __read_mostly = { 89 .func = ftrace_stub, 90 .flags = FTRACE_OPS_FL_STUB, 91 INIT_OPS_HASH(ftrace_list_end) 92 }; 93 94 /* ftrace_enabled is a method to turn ftrace on or off */ 95 int ftrace_enabled __read_mostly; 96 static int __maybe_unused last_ftrace_enabled; 97 98 /* Current function tracing op */ 99 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 100 /* What to set function_trace_op to */ 101 static struct ftrace_ops *set_function_trace_op; 102 103 bool ftrace_pids_enabled(struct ftrace_ops *ops) 104 { 105 struct trace_array *tr; 106 107 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 108 return false; 109 110 tr = ops->private; 111 112 return tr->function_pids != NULL || tr->function_no_pids != NULL; 113 } 114 115 static void ftrace_update_trampoline(struct ftrace_ops *ops); 116 117 /* 118 * ftrace_disabled is set when an anomaly is discovered. 119 * ftrace_disabled is much stronger than ftrace_enabled. 120 */ 121 static int ftrace_disabled __read_mostly; 122 123 DEFINE_MUTEX(ftrace_lock); 124 125 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end; 126 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 127 struct ftrace_ops global_ops; 128 129 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ 130 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 131 struct ftrace_ops *op, struct ftrace_regs *fregs); 132 133 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS 134 /* 135 * Stub used to invoke the list ops without requiring a separate trampoline. 136 */ 137 const struct ftrace_ops ftrace_list_ops = { 138 .func = ftrace_ops_list_func, 139 .flags = FTRACE_OPS_FL_STUB, 140 }; 141 142 static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, 143 struct ftrace_ops *op, 144 struct ftrace_regs *fregs) 145 { 146 /* do nothing */ 147 } 148 149 /* 150 * Stub used when a call site is disabled. May be called transiently by threads 151 * which have made it into ftrace_caller but haven't yet recovered the ops at 152 * the point the call site is disabled. 153 */ 154 const struct ftrace_ops ftrace_nop_ops = { 155 .func = ftrace_ops_nop_func, 156 .flags = FTRACE_OPS_FL_STUB, 157 }; 158 #endif 159 160 static inline void ftrace_ops_init(struct ftrace_ops *ops) 161 { 162 #ifdef CONFIG_DYNAMIC_FTRACE 163 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 164 mutex_init(&ops->local_hash.regex_lock); 165 INIT_LIST_HEAD(&ops->subop_list); 166 ops->func_hash = &ops->local_hash; 167 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 168 } 169 #endif 170 } 171 172 /* Call this function for when a callback filters on set_ftrace_pid */ 173 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 174 struct ftrace_ops *op, struct ftrace_regs *fregs) 175 { 176 struct trace_array *tr = op->private; 177 int pid; 178 179 if (tr) { 180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 181 if (pid == FTRACE_PID_IGNORE) 182 return; 183 if (pid != FTRACE_PID_TRACE && 184 pid != current->pid) 185 return; 186 } 187 188 op->saved_func(ip, parent_ip, op, fregs); 189 } 190 191 static void ftrace_sync_ipi(void *data) 192 { 193 /* Probably not needed, but do it anyway */ 194 smp_rmb(); 195 } 196 197 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 198 { 199 /* 200 * If this is a dynamic or RCU ops, or we force list func, 201 * then it needs to call the list anyway. 202 */ 203 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 204 FTRACE_FORCE_LIST_FUNC) 205 return ftrace_ops_list_func; 206 207 return ftrace_ops_get_func(ops); 208 } 209 210 static void update_ftrace_function(void) 211 { 212 ftrace_func_t func; 213 214 /* 215 * Prepare the ftrace_ops that the arch callback will use. 216 * If there's only one ftrace_ops registered, the ftrace_ops_list 217 * will point to the ops we want. 218 */ 219 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 220 lockdep_is_held(&ftrace_lock)); 221 222 /* If there's no ftrace_ops registered, just call the stub function */ 223 if (set_function_trace_op == &ftrace_list_end) { 224 func = ftrace_stub; 225 226 /* 227 * If we are at the end of the list and this ops is 228 * recursion safe and not dynamic and the arch supports passing ops, 229 * then have the mcount trampoline call the function directly. 230 */ 231 } else if (rcu_dereference_protected(ftrace_ops_list->next, 232 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 233 func = ftrace_ops_get_list_func(ftrace_ops_list); 234 235 } else { 236 /* Just use the default ftrace_ops */ 237 set_function_trace_op = &ftrace_list_end; 238 func = ftrace_ops_list_func; 239 } 240 241 /* If there's no change, then do nothing more here */ 242 if (ftrace_trace_function == func) 243 return; 244 245 /* 246 * If we are using the list function, it doesn't care 247 * about the function_trace_ops. 248 */ 249 if (func == ftrace_ops_list_func) { 250 ftrace_trace_function = func; 251 /* 252 * Don't even bother setting function_trace_ops, 253 * it would be racy to do so anyway. 254 */ 255 return; 256 } 257 258 #ifndef CONFIG_DYNAMIC_FTRACE 259 /* 260 * For static tracing, we need to be a bit more careful. 261 * The function change takes affect immediately. Thus, 262 * we need to coordinate the setting of the function_trace_ops 263 * with the setting of the ftrace_trace_function. 264 * 265 * Set the function to the list ops, which will call the 266 * function we want, albeit indirectly, but it handles the 267 * ftrace_ops and doesn't depend on function_trace_op. 268 */ 269 ftrace_trace_function = ftrace_ops_list_func; 270 /* 271 * Make sure all CPUs see this. Yes this is slow, but static 272 * tracing is slow and nasty to have enabled. 273 */ 274 synchronize_rcu_tasks_rude(); 275 /* Now all cpus are using the list ops. */ 276 function_trace_op = set_function_trace_op; 277 /* Make sure the function_trace_op is visible on all CPUs */ 278 smp_wmb(); 279 /* Nasty way to force a rmb on all cpus */ 280 smp_call_function(ftrace_sync_ipi, NULL, 1); 281 /* OK, we are all set to update the ftrace_trace_function now! */ 282 #endif /* !CONFIG_DYNAMIC_FTRACE */ 283 284 ftrace_trace_function = func; 285 } 286 287 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 288 struct ftrace_ops *ops) 289 { 290 rcu_assign_pointer(ops->next, *list); 291 292 /* 293 * We are entering ops into the list but another 294 * CPU might be walking that list. We need to make sure 295 * the ops->next pointer is valid before another CPU sees 296 * the ops pointer included into the list. 297 */ 298 rcu_assign_pointer(*list, ops); 299 } 300 301 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 302 struct ftrace_ops *ops) 303 { 304 struct ftrace_ops **p; 305 306 /* 307 * If we are removing the last function, then simply point 308 * to the ftrace_stub. 309 */ 310 if (rcu_dereference_protected(*list, 311 lockdep_is_held(&ftrace_lock)) == ops && 312 rcu_dereference_protected(ops->next, 313 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 314 rcu_assign_pointer(*list, &ftrace_list_end); 315 return 0; 316 } 317 318 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 319 if (*p == ops) 320 break; 321 322 if (*p != ops) 323 return -1; 324 325 *p = (*p)->next; 326 return 0; 327 } 328 329 static void ftrace_update_trampoline(struct ftrace_ops *ops); 330 331 int __register_ftrace_function(struct ftrace_ops *ops) 332 { 333 if (ops->flags & FTRACE_OPS_FL_DELETED) 334 return -EINVAL; 335 336 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 337 return -EBUSY; 338 339 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 340 /* 341 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 342 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 343 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 344 */ 345 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 346 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 347 return -EINVAL; 348 349 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 350 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 351 #endif 352 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) 353 return -EBUSY; 354 355 if (!is_kernel_core_data((unsigned long)ops)) 356 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 357 358 add_ftrace_ops(&ftrace_ops_list, ops); 359 360 /* Always save the function, and reset at unregistering */ 361 ops->saved_func = ops->func; 362 363 if (ftrace_pids_enabled(ops)) 364 ops->func = ftrace_pid_func; 365 366 ftrace_update_trampoline(ops); 367 368 if (ftrace_enabled) 369 update_ftrace_function(); 370 371 return 0; 372 } 373 374 int __unregister_ftrace_function(struct ftrace_ops *ops) 375 { 376 int ret; 377 378 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 379 return -EBUSY; 380 381 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 382 383 if (ret < 0) 384 return ret; 385 386 if (ftrace_enabled) 387 update_ftrace_function(); 388 389 ops->func = ops->saved_func; 390 391 return 0; 392 } 393 394 static void ftrace_update_pid_func(void) 395 { 396 struct ftrace_ops *op; 397 398 /* Only do something if we are tracing something */ 399 if (ftrace_trace_function == ftrace_stub) 400 return; 401 402 do_for_each_ftrace_op(op, ftrace_ops_list) { 403 if (op->flags & FTRACE_OPS_FL_PID) { 404 op->func = ftrace_pids_enabled(op) ? 405 ftrace_pid_func : op->saved_func; 406 ftrace_update_trampoline(op); 407 } 408 } while_for_each_ftrace_op(op); 409 410 fgraph_update_pid_func(); 411 412 update_ftrace_function(); 413 } 414 415 #ifdef CONFIG_FUNCTION_PROFILER 416 struct ftrace_profile { 417 struct hlist_node node; 418 unsigned long ip; 419 unsigned long counter; 420 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 421 unsigned long long time; 422 unsigned long long time_squared; 423 #endif 424 }; 425 426 struct ftrace_profile_page { 427 struct ftrace_profile_page *next; 428 unsigned long index; 429 struct ftrace_profile records[]; 430 }; 431 432 struct ftrace_profile_stat { 433 atomic_t disabled; 434 struct hlist_head *hash; 435 struct ftrace_profile_page *pages; 436 struct ftrace_profile_page *start; 437 struct tracer_stat stat; 438 }; 439 440 #define PROFILE_RECORDS_SIZE \ 441 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 442 443 #define PROFILES_PER_PAGE \ 444 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 445 446 static int ftrace_profile_enabled __read_mostly; 447 448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 449 static DEFINE_MUTEX(ftrace_profile_lock); 450 451 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 452 453 #define FTRACE_PROFILE_HASH_BITS 10 454 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 455 456 static void * 457 function_stat_next(void *v, int idx) 458 { 459 struct ftrace_profile *rec = v; 460 struct ftrace_profile_page *pg; 461 462 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 463 464 again: 465 if (idx != 0) 466 rec++; 467 468 if ((void *)rec >= (void *)&pg->records[pg->index]) { 469 pg = pg->next; 470 if (!pg) 471 return NULL; 472 rec = &pg->records[0]; 473 if (!rec->counter) 474 goto again; 475 } 476 477 return rec; 478 } 479 480 static void *function_stat_start(struct tracer_stat *trace) 481 { 482 struct ftrace_profile_stat *stat = 483 container_of(trace, struct ftrace_profile_stat, stat); 484 485 if (!stat || !stat->start) 486 return NULL; 487 488 return function_stat_next(&stat->start->records[0], 0); 489 } 490 491 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 492 /* function graph compares on total time */ 493 static int function_stat_cmp(const void *p1, const void *p2) 494 { 495 const struct ftrace_profile *a = p1; 496 const struct ftrace_profile *b = p2; 497 498 if (a->time < b->time) 499 return -1; 500 if (a->time > b->time) 501 return 1; 502 else 503 return 0; 504 } 505 #else 506 /* not function graph compares against hits */ 507 static int function_stat_cmp(const void *p1, const void *p2) 508 { 509 const struct ftrace_profile *a = p1; 510 const struct ftrace_profile *b = p2; 511 512 if (a->counter < b->counter) 513 return -1; 514 if (a->counter > b->counter) 515 return 1; 516 else 517 return 0; 518 } 519 #endif 520 521 static int function_stat_headers(struct seq_file *m) 522 { 523 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 524 seq_puts(m, " Function " 525 "Hit Time Avg s^2\n" 526 " -------- " 527 "--- ---- --- ---\n"); 528 #else 529 seq_puts(m, " Function Hit\n" 530 " -------- ---\n"); 531 #endif 532 return 0; 533 } 534 535 static int function_stat_show(struct seq_file *m, void *v) 536 { 537 struct ftrace_profile *rec = v; 538 char str[KSYM_SYMBOL_LEN]; 539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 540 static struct trace_seq s; 541 unsigned long long avg; 542 unsigned long long stddev; 543 #endif 544 guard(mutex)(&ftrace_profile_lock); 545 546 /* we raced with function_profile_reset() */ 547 if (unlikely(rec->counter == 0)) 548 return -EBUSY; 549 550 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 551 avg = div64_ul(rec->time, rec->counter); 552 if (tracing_thresh && (avg < tracing_thresh)) 553 return 0; 554 #endif 555 556 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 557 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 558 559 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 560 seq_puts(m, " "); 561 562 /* Sample standard deviation (s^2) */ 563 if (rec->counter <= 1) 564 stddev = 0; 565 else { 566 /* 567 * Apply Welford's method: 568 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 569 */ 570 stddev = rec->counter * rec->time_squared - 571 rec->time * rec->time; 572 573 /* 574 * Divide only 1000 for ns^2 -> us^2 conversion. 575 * trace_print_graph_duration will divide 1000 again. 576 */ 577 stddev = div64_ul(stddev, 578 rec->counter * (rec->counter - 1) * 1000); 579 } 580 581 trace_seq_init(&s); 582 trace_print_graph_duration(rec->time, &s); 583 trace_seq_puts(&s, " "); 584 trace_print_graph_duration(avg, &s); 585 trace_seq_puts(&s, " "); 586 trace_print_graph_duration(stddev, &s); 587 trace_print_seq(m, &s); 588 #endif 589 seq_putc(m, '\n'); 590 591 return 0; 592 } 593 594 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 595 { 596 struct ftrace_profile_page *pg; 597 598 pg = stat->pages = stat->start; 599 600 while (pg) { 601 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 602 pg->index = 0; 603 pg = pg->next; 604 } 605 606 memset(stat->hash, 0, 607 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 608 } 609 610 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 611 { 612 struct ftrace_profile_page *pg; 613 int functions; 614 int pages; 615 int i; 616 617 /* If we already allocated, do nothing */ 618 if (stat->pages) 619 return 0; 620 621 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 622 if (!stat->pages) 623 return -ENOMEM; 624 625 #ifdef CONFIG_DYNAMIC_FTRACE 626 functions = ftrace_update_tot_cnt; 627 #else 628 /* 629 * We do not know the number of functions that exist because 630 * dynamic tracing is what counts them. With past experience 631 * we have around 20K functions. That should be more than enough. 632 * It is highly unlikely we will execute every function in 633 * the kernel. 634 */ 635 functions = 20000; 636 #endif 637 638 pg = stat->start = stat->pages; 639 640 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 641 642 for (i = 1; i < pages; i++) { 643 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 644 if (!pg->next) 645 goto out_free; 646 pg = pg->next; 647 } 648 649 return 0; 650 651 out_free: 652 pg = stat->start; 653 while (pg) { 654 unsigned long tmp = (unsigned long)pg; 655 656 pg = pg->next; 657 free_page(tmp); 658 } 659 660 stat->pages = NULL; 661 stat->start = NULL; 662 663 return -ENOMEM; 664 } 665 666 static int ftrace_profile_init_cpu(int cpu) 667 { 668 struct ftrace_profile_stat *stat; 669 int size; 670 671 stat = &per_cpu(ftrace_profile_stats, cpu); 672 673 if (stat->hash) { 674 /* If the profile is already created, simply reset it */ 675 ftrace_profile_reset(stat); 676 return 0; 677 } 678 679 /* 680 * We are profiling all functions, but usually only a few thousand 681 * functions are hit. We'll make a hash of 1024 items. 682 */ 683 size = FTRACE_PROFILE_HASH_SIZE; 684 685 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 686 687 if (!stat->hash) 688 return -ENOMEM; 689 690 /* Preallocate the function profiling pages */ 691 if (ftrace_profile_pages_init(stat) < 0) { 692 kfree(stat->hash); 693 stat->hash = NULL; 694 return -ENOMEM; 695 } 696 697 return 0; 698 } 699 700 static int ftrace_profile_init(void) 701 { 702 int cpu; 703 int ret = 0; 704 705 for_each_possible_cpu(cpu) { 706 ret = ftrace_profile_init_cpu(cpu); 707 if (ret) 708 break; 709 } 710 711 return ret; 712 } 713 714 /* interrupts must be disabled */ 715 static struct ftrace_profile * 716 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 717 { 718 struct ftrace_profile *rec; 719 struct hlist_head *hhd; 720 unsigned long key; 721 722 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 723 hhd = &stat->hash[key]; 724 725 if (hlist_empty(hhd)) 726 return NULL; 727 728 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 729 if (rec->ip == ip) 730 return rec; 731 } 732 733 return NULL; 734 } 735 736 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 737 struct ftrace_profile *rec) 738 { 739 unsigned long key; 740 741 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 742 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 743 } 744 745 /* 746 * The memory is already allocated, this simply finds a new record to use. 747 */ 748 static struct ftrace_profile * 749 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 750 { 751 struct ftrace_profile *rec = NULL; 752 753 /* prevent recursion (from NMIs) */ 754 if (atomic_inc_return(&stat->disabled) != 1) 755 goto out; 756 757 /* 758 * Try to find the function again since an NMI 759 * could have added it 760 */ 761 rec = ftrace_find_profiled_func(stat, ip); 762 if (rec) 763 goto out; 764 765 if (stat->pages->index == PROFILES_PER_PAGE) { 766 if (!stat->pages->next) 767 goto out; 768 stat->pages = stat->pages->next; 769 } 770 771 rec = &stat->pages->records[stat->pages->index++]; 772 rec->ip = ip; 773 ftrace_add_profile(stat, rec); 774 775 out: 776 atomic_dec(&stat->disabled); 777 778 return rec; 779 } 780 781 static void 782 function_profile_call(unsigned long ip, unsigned long parent_ip, 783 struct ftrace_ops *ops, struct ftrace_regs *fregs) 784 { 785 struct ftrace_profile_stat *stat; 786 struct ftrace_profile *rec; 787 788 if (!ftrace_profile_enabled) 789 return; 790 791 guard(preempt_notrace)(); 792 793 stat = this_cpu_ptr(&ftrace_profile_stats); 794 if (!stat->hash || !ftrace_profile_enabled) 795 return; 796 797 rec = ftrace_find_profiled_func(stat, ip); 798 if (!rec) { 799 rec = ftrace_profile_alloc(stat, ip); 800 if (!rec) 801 return; 802 } 803 804 rec->counter++; 805 } 806 807 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 808 static bool fgraph_graph_time = true; 809 810 void ftrace_graph_graph_time_control(bool enable) 811 { 812 fgraph_graph_time = enable; 813 } 814 815 struct profile_fgraph_data { 816 unsigned long long calltime; 817 unsigned long long subtime; 818 unsigned long long sleeptime; 819 }; 820 821 static int profile_graph_entry(struct ftrace_graph_ent *trace, 822 struct fgraph_ops *gops, 823 struct ftrace_regs *fregs) 824 { 825 struct profile_fgraph_data *profile_data; 826 827 function_profile_call(trace->func, 0, NULL, NULL); 828 829 /* If function graph is shutting down, ret_stack can be NULL */ 830 if (!current->ret_stack) 831 return 0; 832 833 profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data)); 834 if (!profile_data) 835 return 0; 836 837 profile_data->subtime = 0; 838 profile_data->sleeptime = current->ftrace_sleeptime; 839 profile_data->calltime = trace_clock_local(); 840 841 return 1; 842 } 843 844 static void profile_graph_return(struct ftrace_graph_ret *trace, 845 struct fgraph_ops *gops, 846 struct ftrace_regs *fregs) 847 { 848 struct profile_fgraph_data *profile_data; 849 struct ftrace_profile_stat *stat; 850 unsigned long long calltime; 851 unsigned long long rettime = trace_clock_local(); 852 struct ftrace_profile *rec; 853 int size; 854 855 guard(preempt_notrace)(); 856 857 stat = this_cpu_ptr(&ftrace_profile_stats); 858 if (!stat->hash || !ftrace_profile_enabled) 859 return; 860 861 profile_data = fgraph_retrieve_data(gops->idx, &size); 862 863 /* If the calltime was zero'd ignore it */ 864 if (!profile_data || !profile_data->calltime) 865 return; 866 867 calltime = rettime - profile_data->calltime; 868 869 if (!fgraph_sleep_time) { 870 if (current->ftrace_sleeptime) 871 calltime -= current->ftrace_sleeptime - profile_data->sleeptime; 872 } 873 874 if (!fgraph_graph_time) { 875 struct profile_fgraph_data *parent_data; 876 877 /* Append this call time to the parent time to subtract */ 878 parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1); 879 if (parent_data) 880 parent_data->subtime += calltime; 881 882 if (profile_data->subtime && profile_data->subtime < calltime) 883 calltime -= profile_data->subtime; 884 else 885 calltime = 0; 886 } 887 888 rec = ftrace_find_profiled_func(stat, trace->func); 889 if (rec) { 890 rec->time += calltime; 891 rec->time_squared += calltime * calltime; 892 } 893 } 894 895 static struct fgraph_ops fprofiler_ops = { 896 .ops = { 897 .flags = FTRACE_OPS_FL_INITIALIZED, 898 INIT_OPS_HASH(fprofiler_ops.ops) 899 }, 900 .entryfunc = &profile_graph_entry, 901 .retfunc = &profile_graph_return, 902 }; 903 904 static int register_ftrace_profiler(void) 905 { 906 return register_ftrace_graph(&fprofiler_ops); 907 } 908 909 static void unregister_ftrace_profiler(void) 910 { 911 unregister_ftrace_graph(&fprofiler_ops); 912 } 913 #else 914 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 915 .func = function_profile_call, 916 .flags = FTRACE_OPS_FL_INITIALIZED, 917 INIT_OPS_HASH(ftrace_profile_ops) 918 }; 919 920 static int register_ftrace_profiler(void) 921 { 922 return register_ftrace_function(&ftrace_profile_ops); 923 } 924 925 static void unregister_ftrace_profiler(void) 926 { 927 unregister_ftrace_function(&ftrace_profile_ops); 928 } 929 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 930 931 static ssize_t 932 ftrace_profile_write(struct file *filp, const char __user *ubuf, 933 size_t cnt, loff_t *ppos) 934 { 935 unsigned long val; 936 int ret; 937 938 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 939 if (ret) 940 return ret; 941 942 val = !!val; 943 944 guard(mutex)(&ftrace_profile_lock); 945 if (ftrace_profile_enabled ^ val) { 946 if (val) { 947 ret = ftrace_profile_init(); 948 if (ret < 0) 949 return ret; 950 951 ret = register_ftrace_profiler(); 952 if (ret < 0) 953 return ret; 954 ftrace_profile_enabled = 1; 955 } else { 956 ftrace_profile_enabled = 0; 957 /* 958 * unregister_ftrace_profiler calls stop_machine 959 * so this acts like an synchronize_rcu. 960 */ 961 unregister_ftrace_profiler(); 962 } 963 } 964 965 *ppos += cnt; 966 967 return cnt; 968 } 969 970 static ssize_t 971 ftrace_profile_read(struct file *filp, char __user *ubuf, 972 size_t cnt, loff_t *ppos) 973 { 974 char buf[64]; /* big enough to hold a number */ 975 int r; 976 977 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 978 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 979 } 980 981 static const struct file_operations ftrace_profile_fops = { 982 .open = tracing_open_generic, 983 .read = ftrace_profile_read, 984 .write = ftrace_profile_write, 985 .llseek = default_llseek, 986 }; 987 988 /* used to initialize the real stat files */ 989 static struct tracer_stat function_stats __initdata = { 990 .name = "functions", 991 .stat_start = function_stat_start, 992 .stat_next = function_stat_next, 993 .stat_cmp = function_stat_cmp, 994 .stat_headers = function_stat_headers, 995 .stat_show = function_stat_show 996 }; 997 998 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 999 { 1000 struct ftrace_profile_stat *stat; 1001 char *name; 1002 int ret; 1003 int cpu; 1004 1005 for_each_possible_cpu(cpu) { 1006 stat = &per_cpu(ftrace_profile_stats, cpu); 1007 1008 name = kasprintf(GFP_KERNEL, "function%d", cpu); 1009 if (!name) { 1010 /* 1011 * The files created are permanent, if something happens 1012 * we still do not free memory. 1013 */ 1014 WARN(1, 1015 "Could not allocate stat file for cpu %d\n", 1016 cpu); 1017 return; 1018 } 1019 stat->stat = function_stats; 1020 stat->stat.name = name; 1021 ret = register_stat_tracer(&stat->stat); 1022 if (ret) { 1023 WARN(1, 1024 "Could not register function stat for cpu %d\n", 1025 cpu); 1026 kfree(name); 1027 return; 1028 } 1029 } 1030 1031 trace_create_file("function_profile_enabled", 1032 TRACE_MODE_WRITE, d_tracer, NULL, 1033 &ftrace_profile_fops); 1034 } 1035 1036 #else /* CONFIG_FUNCTION_PROFILER */ 1037 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1038 { 1039 } 1040 #endif /* CONFIG_FUNCTION_PROFILER */ 1041 1042 #ifdef CONFIG_DYNAMIC_FTRACE 1043 1044 static struct ftrace_ops *removed_ops; 1045 1046 /* 1047 * Set when doing a global update, like enabling all recs or disabling them. 1048 * It is not set when just updating a single ftrace_ops. 1049 */ 1050 static bool update_all_ops; 1051 1052 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1053 # error Dynamic ftrace depends on MCOUNT_RECORD 1054 #endif 1055 1056 struct ftrace_func_probe { 1057 struct ftrace_probe_ops *probe_ops; 1058 struct ftrace_ops ops; 1059 struct trace_array *tr; 1060 struct list_head list; 1061 void *data; 1062 int ref; 1063 }; 1064 1065 /* 1066 * We make these constant because no one should touch them, 1067 * but they are used as the default "empty hash", to avoid allocating 1068 * it all the time. These are in a read only section such that if 1069 * anyone does try to modify it, it will cause an exception. 1070 */ 1071 static const struct hlist_head empty_buckets[1]; 1072 static const struct ftrace_hash empty_hash = { 1073 .buckets = (struct hlist_head *)empty_buckets, 1074 }; 1075 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1076 1077 struct ftrace_ops global_ops = { 1078 .func = ftrace_stub, 1079 .local_hash.notrace_hash = EMPTY_HASH, 1080 .local_hash.filter_hash = EMPTY_HASH, 1081 INIT_OPS_HASH(global_ops) 1082 .flags = FTRACE_OPS_FL_INITIALIZED | 1083 FTRACE_OPS_FL_PID, 1084 }; 1085 1086 /* 1087 * Used by the stack unwinder to know about dynamic ftrace trampolines. 1088 */ 1089 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1090 { 1091 struct ftrace_ops *op = NULL; 1092 1093 /* 1094 * Some of the ops may be dynamically allocated, 1095 * they are freed after a synchronize_rcu(). 1096 */ 1097 preempt_disable_notrace(); 1098 1099 do_for_each_ftrace_op(op, ftrace_ops_list) { 1100 /* 1101 * This is to check for dynamically allocated trampolines. 1102 * Trampolines that are in kernel text will have 1103 * core_kernel_text() return true. 1104 */ 1105 if (op->trampoline && op->trampoline_size) 1106 if (addr >= op->trampoline && 1107 addr < op->trampoline + op->trampoline_size) { 1108 preempt_enable_notrace(); 1109 return op; 1110 } 1111 } while_for_each_ftrace_op(op); 1112 preempt_enable_notrace(); 1113 1114 return NULL; 1115 } 1116 1117 /* 1118 * This is used by __kernel_text_address() to return true if the 1119 * address is on a dynamically allocated trampoline that would 1120 * not return true for either core_kernel_text() or 1121 * is_module_text_address(). 1122 */ 1123 bool is_ftrace_trampoline(unsigned long addr) 1124 { 1125 return ftrace_ops_trampoline(addr) != NULL; 1126 } 1127 1128 struct ftrace_page { 1129 struct ftrace_page *next; 1130 struct dyn_ftrace *records; 1131 int index; 1132 int order; 1133 }; 1134 1135 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1136 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1137 1138 static struct ftrace_page *ftrace_pages_start; 1139 static struct ftrace_page *ftrace_pages; 1140 1141 static __always_inline unsigned long 1142 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1143 { 1144 if (hash->size_bits > 0) 1145 return hash_long(ip, hash->size_bits); 1146 1147 return 0; 1148 } 1149 1150 /* Only use this function if ftrace_hash_empty() has already been tested */ 1151 static __always_inline struct ftrace_func_entry * 1152 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1153 { 1154 unsigned long key; 1155 struct ftrace_func_entry *entry; 1156 struct hlist_head *hhd; 1157 1158 key = ftrace_hash_key(hash, ip); 1159 hhd = &hash->buckets[key]; 1160 1161 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1162 if (entry->ip == ip) 1163 return entry; 1164 } 1165 return NULL; 1166 } 1167 1168 /** 1169 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1170 * @hash: The hash to look at 1171 * @ip: The instruction pointer to test 1172 * 1173 * Search a given @hash to see if a given instruction pointer (@ip) 1174 * exists in it. 1175 * 1176 * Returns: the entry that holds the @ip if found. NULL otherwise. 1177 */ 1178 struct ftrace_func_entry * 1179 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1180 { 1181 if (ftrace_hash_empty(hash)) 1182 return NULL; 1183 1184 return __ftrace_lookup_ip(hash, ip); 1185 } 1186 1187 static void __add_hash_entry(struct ftrace_hash *hash, 1188 struct ftrace_func_entry *entry) 1189 { 1190 struct hlist_head *hhd; 1191 unsigned long key; 1192 1193 key = ftrace_hash_key(hash, entry->ip); 1194 hhd = &hash->buckets[key]; 1195 hlist_add_head(&entry->hlist, hhd); 1196 hash->count++; 1197 } 1198 1199 static struct ftrace_func_entry * 1200 add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1201 { 1202 struct ftrace_func_entry *entry; 1203 1204 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1205 if (!entry) 1206 return NULL; 1207 1208 entry->ip = ip; 1209 __add_hash_entry(hash, entry); 1210 1211 return entry; 1212 } 1213 1214 static void 1215 free_hash_entry(struct ftrace_hash *hash, 1216 struct ftrace_func_entry *entry) 1217 { 1218 hlist_del(&entry->hlist); 1219 kfree(entry); 1220 hash->count--; 1221 } 1222 1223 static void 1224 remove_hash_entry(struct ftrace_hash *hash, 1225 struct ftrace_func_entry *entry) 1226 { 1227 hlist_del_rcu(&entry->hlist); 1228 hash->count--; 1229 } 1230 1231 static void ftrace_hash_clear(struct ftrace_hash *hash) 1232 { 1233 struct hlist_head *hhd; 1234 struct hlist_node *tn; 1235 struct ftrace_func_entry *entry; 1236 int size = 1 << hash->size_bits; 1237 int i; 1238 1239 if (!hash->count) 1240 return; 1241 1242 for (i = 0; i < size; i++) { 1243 hhd = &hash->buckets[i]; 1244 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1245 free_hash_entry(hash, entry); 1246 } 1247 FTRACE_WARN_ON(hash->count); 1248 } 1249 1250 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1251 { 1252 list_del(&ftrace_mod->list); 1253 kfree(ftrace_mod->module); 1254 kfree(ftrace_mod->func); 1255 kfree(ftrace_mod); 1256 } 1257 1258 static void clear_ftrace_mod_list(struct list_head *head) 1259 { 1260 struct ftrace_mod_load *p, *n; 1261 1262 /* stack tracer isn't supported yet */ 1263 if (!head) 1264 return; 1265 1266 mutex_lock(&ftrace_lock); 1267 list_for_each_entry_safe(p, n, head, list) 1268 free_ftrace_mod(p); 1269 mutex_unlock(&ftrace_lock); 1270 } 1271 1272 static void free_ftrace_hash(struct ftrace_hash *hash) 1273 { 1274 if (!hash || hash == EMPTY_HASH) 1275 return; 1276 ftrace_hash_clear(hash); 1277 kfree(hash->buckets); 1278 kfree(hash); 1279 } 1280 1281 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1282 { 1283 struct ftrace_hash *hash; 1284 1285 hash = container_of(rcu, struct ftrace_hash, rcu); 1286 free_ftrace_hash(hash); 1287 } 1288 1289 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1290 { 1291 if (!hash || hash == EMPTY_HASH) 1292 return; 1293 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1294 } 1295 1296 /** 1297 * ftrace_free_filter - remove all filters for an ftrace_ops 1298 * @ops: the ops to remove the filters from 1299 */ 1300 void ftrace_free_filter(struct ftrace_ops *ops) 1301 { 1302 ftrace_ops_init(ops); 1303 free_ftrace_hash(ops->func_hash->filter_hash); 1304 free_ftrace_hash(ops->func_hash->notrace_hash); 1305 } 1306 EXPORT_SYMBOL_GPL(ftrace_free_filter); 1307 1308 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1309 { 1310 struct ftrace_hash *hash; 1311 int size; 1312 1313 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1314 if (!hash) 1315 return NULL; 1316 1317 size = 1 << size_bits; 1318 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1319 1320 if (!hash->buckets) { 1321 kfree(hash); 1322 return NULL; 1323 } 1324 1325 hash->size_bits = size_bits; 1326 1327 return hash; 1328 } 1329 1330 /* Used to save filters on functions for modules not loaded yet */ 1331 static int ftrace_add_mod(struct trace_array *tr, 1332 const char *func, const char *module, 1333 int enable) 1334 { 1335 struct ftrace_mod_load *ftrace_mod; 1336 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1337 1338 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1339 if (!ftrace_mod) 1340 return -ENOMEM; 1341 1342 INIT_LIST_HEAD(&ftrace_mod->list); 1343 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1344 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1345 ftrace_mod->enable = enable; 1346 1347 if (!ftrace_mod->func || !ftrace_mod->module) 1348 goto out_free; 1349 1350 list_add(&ftrace_mod->list, mod_head); 1351 1352 return 0; 1353 1354 out_free: 1355 free_ftrace_mod(ftrace_mod); 1356 1357 return -ENOMEM; 1358 } 1359 1360 static struct ftrace_hash * 1361 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1362 { 1363 struct ftrace_func_entry *entry; 1364 struct ftrace_hash *new_hash; 1365 int size; 1366 int i; 1367 1368 new_hash = alloc_ftrace_hash(size_bits); 1369 if (!new_hash) 1370 return NULL; 1371 1372 if (hash) 1373 new_hash->flags = hash->flags; 1374 1375 /* Empty hash? */ 1376 if (ftrace_hash_empty(hash)) 1377 return new_hash; 1378 1379 size = 1 << hash->size_bits; 1380 for (i = 0; i < size; i++) { 1381 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1382 if (add_hash_entry(new_hash, entry->ip) == NULL) 1383 goto free_hash; 1384 } 1385 } 1386 1387 FTRACE_WARN_ON(new_hash->count != hash->count); 1388 1389 return new_hash; 1390 1391 free_hash: 1392 free_ftrace_hash(new_hash); 1393 return NULL; 1394 } 1395 1396 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops); 1397 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops); 1398 1399 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1400 struct ftrace_hash *new_hash); 1401 1402 /* 1403 * Allocate a new hash and remove entries from @src and move them to the new hash. 1404 * On success, the @src hash will be empty and should be freed. 1405 */ 1406 static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size) 1407 { 1408 struct ftrace_func_entry *entry; 1409 struct ftrace_hash *new_hash; 1410 struct hlist_head *hhd; 1411 struct hlist_node *tn; 1412 int bits = 0; 1413 int i; 1414 1415 /* 1416 * Use around half the size (max bit of it), but 1417 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). 1418 */ 1419 bits = fls(size / 2); 1420 1421 /* Don't allocate too much */ 1422 if (bits > FTRACE_HASH_MAX_BITS) 1423 bits = FTRACE_HASH_MAX_BITS; 1424 1425 new_hash = alloc_ftrace_hash(bits); 1426 if (!new_hash) 1427 return NULL; 1428 1429 new_hash->flags = src->flags; 1430 1431 size = 1 << src->size_bits; 1432 for (i = 0; i < size; i++) { 1433 hhd = &src->buckets[i]; 1434 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1435 remove_hash_entry(src, entry); 1436 __add_hash_entry(new_hash, entry); 1437 } 1438 } 1439 return new_hash; 1440 } 1441 1442 /* Move the @src entries to a newly allocated hash */ 1443 static struct ftrace_hash * 1444 __ftrace_hash_move(struct ftrace_hash *src) 1445 { 1446 int size = src->count; 1447 1448 /* 1449 * If the new source is empty, just return the empty_hash. 1450 */ 1451 if (ftrace_hash_empty(src)) 1452 return EMPTY_HASH; 1453 1454 return __move_hash(src, size); 1455 } 1456 1457 /** 1458 * ftrace_hash_move - move a new hash to a filter and do updates 1459 * @ops: The ops with the hash that @dst points to 1460 * @enable: True if for the filter hash, false for the notrace hash 1461 * @dst: Points to the @ops hash that should be updated 1462 * @src: The hash to update @dst with 1463 * 1464 * This is called when an ftrace_ops hash is being updated and the 1465 * the kernel needs to reflect this. Note, this only updates the kernel 1466 * function callbacks if the @ops is enabled (not to be confused with 1467 * @enable above). If the @ops is enabled, its hash determines what 1468 * callbacks get called. This function gets called when the @ops hash 1469 * is updated and it requires new callbacks. 1470 * 1471 * On success the elements of @src is moved to @dst, and @dst is updated 1472 * properly, as well as the functions determined by the @ops hashes 1473 * are now calling the @ops callback function. 1474 * 1475 * Regardless of return type, @src should be freed with free_ftrace_hash(). 1476 */ 1477 static int 1478 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1479 struct ftrace_hash **dst, struct ftrace_hash *src) 1480 { 1481 struct ftrace_hash *new_hash; 1482 int ret; 1483 1484 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1485 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1486 return -EINVAL; 1487 1488 new_hash = __ftrace_hash_move(src); 1489 if (!new_hash) 1490 return -ENOMEM; 1491 1492 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1493 if (enable) { 1494 /* IPMODIFY should be updated only when filter_hash updating */ 1495 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1496 if (ret < 0) { 1497 free_ftrace_hash(new_hash); 1498 return ret; 1499 } 1500 } 1501 1502 /* 1503 * Remove the current set, update the hash and add 1504 * them back. 1505 */ 1506 ftrace_hash_rec_disable_modify(ops); 1507 1508 rcu_assign_pointer(*dst, new_hash); 1509 1510 ftrace_hash_rec_enable_modify(ops); 1511 1512 return 0; 1513 } 1514 1515 static bool hash_contains_ip(unsigned long ip, 1516 struct ftrace_ops_hash *hash) 1517 { 1518 /* 1519 * The function record is a match if it exists in the filter 1520 * hash and not in the notrace hash. Note, an empty hash is 1521 * considered a match for the filter hash, but an empty 1522 * notrace hash is considered not in the notrace hash. 1523 */ 1524 return (ftrace_hash_empty(hash->filter_hash) || 1525 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1526 (ftrace_hash_empty(hash->notrace_hash) || 1527 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1528 } 1529 1530 /* 1531 * Test the hashes for this ops to see if we want to call 1532 * the ops->func or not. 1533 * 1534 * It's a match if the ip is in the ops->filter_hash or 1535 * the filter_hash does not exist or is empty, 1536 * AND 1537 * the ip is not in the ops->notrace_hash. 1538 * 1539 * This needs to be called with preemption disabled as 1540 * the hashes are freed with call_rcu(). 1541 */ 1542 int 1543 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1544 { 1545 struct ftrace_ops_hash hash; 1546 int ret; 1547 1548 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1549 /* 1550 * There's a small race when adding ops that the ftrace handler 1551 * that wants regs, may be called without them. We can not 1552 * allow that handler to be called if regs is NULL. 1553 */ 1554 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1555 return 0; 1556 #endif 1557 1558 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1559 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1560 1561 if (hash_contains_ip(ip, &hash)) 1562 ret = 1; 1563 else 1564 ret = 0; 1565 1566 return ret; 1567 } 1568 1569 /* 1570 * This is a double for. Do not use 'break' to break out of the loop, 1571 * you must use a goto. 1572 */ 1573 #define do_for_each_ftrace_rec(pg, rec) \ 1574 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1575 int _____i; \ 1576 for (_____i = 0; _____i < pg->index; _____i++) { \ 1577 rec = &pg->records[_____i]; 1578 1579 #define while_for_each_ftrace_rec() \ 1580 } \ 1581 } 1582 1583 1584 static int ftrace_cmp_recs(const void *a, const void *b) 1585 { 1586 const struct dyn_ftrace *key = a; 1587 const struct dyn_ftrace *rec = b; 1588 1589 if (key->flags < rec->ip) 1590 return -1; 1591 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1592 return 1; 1593 return 0; 1594 } 1595 1596 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) 1597 { 1598 struct ftrace_page *pg; 1599 struct dyn_ftrace *rec = NULL; 1600 struct dyn_ftrace key; 1601 1602 key.ip = start; 1603 key.flags = end; /* overload flags, as it is unsigned long */ 1604 1605 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1606 if (pg->index == 0 || 1607 end < pg->records[0].ip || 1608 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1609 continue; 1610 rec = bsearch(&key, pg->records, pg->index, 1611 sizeof(struct dyn_ftrace), 1612 ftrace_cmp_recs); 1613 if (rec) 1614 break; 1615 } 1616 return rec; 1617 } 1618 1619 /** 1620 * ftrace_location_range - return the first address of a traced location 1621 * if it touches the given ip range 1622 * @start: start of range to search. 1623 * @end: end of range to search (inclusive). @end points to the last byte 1624 * to check. 1625 * 1626 * Returns: rec->ip if the related ftrace location is a least partly within 1627 * the given address range. That is, the first address of the instruction 1628 * that is either a NOP or call to the function tracer. It checks the ftrace 1629 * internal tables to determine if the address belongs or not. 1630 */ 1631 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1632 { 1633 struct dyn_ftrace *rec; 1634 unsigned long ip = 0; 1635 1636 rcu_read_lock(); 1637 rec = lookup_rec(start, end); 1638 if (rec) 1639 ip = rec->ip; 1640 rcu_read_unlock(); 1641 1642 return ip; 1643 } 1644 1645 /** 1646 * ftrace_location - return the ftrace location 1647 * @ip: the instruction pointer to check 1648 * 1649 * Returns: 1650 * * If @ip matches the ftrace location, return @ip. 1651 * * If @ip matches sym+0, return sym's ftrace location. 1652 * * Otherwise, return 0. 1653 */ 1654 unsigned long ftrace_location(unsigned long ip) 1655 { 1656 unsigned long loc; 1657 unsigned long offset; 1658 unsigned long size; 1659 1660 loc = ftrace_location_range(ip, ip); 1661 if (!loc) { 1662 if (!kallsyms_lookup_size_offset(ip, &size, &offset)) 1663 return 0; 1664 1665 /* map sym+0 to __fentry__ */ 1666 if (!offset) 1667 loc = ftrace_location_range(ip, ip + size - 1); 1668 } 1669 return loc; 1670 } 1671 1672 /** 1673 * ftrace_text_reserved - return true if range contains an ftrace location 1674 * @start: start of range to search 1675 * @end: end of range to search (inclusive). @end points to the last byte to check. 1676 * 1677 * Returns: 1 if @start and @end contains a ftrace location. 1678 * That is, the instruction that is either a NOP or call to 1679 * the function tracer. It checks the ftrace internal tables to 1680 * determine if the address belongs or not. 1681 */ 1682 int ftrace_text_reserved(const void *start, const void *end) 1683 { 1684 unsigned long ret; 1685 1686 ret = ftrace_location_range((unsigned long)start, 1687 (unsigned long)end); 1688 1689 return (int)!!ret; 1690 } 1691 1692 /* Test if ops registered to this rec needs regs */ 1693 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1694 { 1695 struct ftrace_ops *ops; 1696 bool keep_regs = false; 1697 1698 for (ops = ftrace_ops_list; 1699 ops != &ftrace_list_end; ops = ops->next) { 1700 /* pass rec in as regs to have non-NULL val */ 1701 if (ftrace_ops_test(ops, rec->ip, rec)) { 1702 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1703 keep_regs = true; 1704 break; 1705 } 1706 } 1707 } 1708 1709 return keep_regs; 1710 } 1711 1712 static struct ftrace_ops * 1713 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1714 static struct ftrace_ops * 1715 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); 1716 static struct ftrace_ops * 1717 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1718 1719 static bool skip_record(struct dyn_ftrace *rec) 1720 { 1721 /* 1722 * At boot up, weak functions are set to disable. Function tracing 1723 * can be enabled before they are, and they still need to be disabled now. 1724 * If the record is disabled, still continue if it is marked as already 1725 * enabled (this is needed to keep the accounting working). 1726 */ 1727 return rec->flags & FTRACE_FL_DISABLED && 1728 !(rec->flags & FTRACE_FL_ENABLED); 1729 } 1730 1731 /* 1732 * This is the main engine to the ftrace updates to the dyn_ftrace records. 1733 * 1734 * It will iterate through all the available ftrace functions 1735 * (the ones that ftrace can have callbacks to) and set the flags 1736 * in the associated dyn_ftrace records. 1737 * 1738 * @inc: If true, the functions associated to @ops are added to 1739 * the dyn_ftrace records, otherwise they are removed. 1740 */ 1741 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1742 bool inc) 1743 { 1744 struct ftrace_hash *hash; 1745 struct ftrace_hash *notrace_hash; 1746 struct ftrace_page *pg; 1747 struct dyn_ftrace *rec; 1748 bool update = false; 1749 int count = 0; 1750 int all = false; 1751 1752 /* Only update if the ops has been registered */ 1753 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1754 return false; 1755 1756 /* 1757 * If the count is zero, we update all records. 1758 * Otherwise we just update the items in the hash. 1759 */ 1760 hash = ops->func_hash->filter_hash; 1761 notrace_hash = ops->func_hash->notrace_hash; 1762 if (ftrace_hash_empty(hash)) 1763 all = true; 1764 1765 do_for_each_ftrace_rec(pg, rec) { 1766 int in_notrace_hash = 0; 1767 int in_hash = 0; 1768 int match = 0; 1769 1770 if (skip_record(rec)) 1771 continue; 1772 1773 if (all) { 1774 /* 1775 * Only the filter_hash affects all records. 1776 * Update if the record is not in the notrace hash. 1777 */ 1778 if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip)) 1779 match = 1; 1780 } else { 1781 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1782 in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip); 1783 1784 /* 1785 * We want to match all functions that are in the hash but 1786 * not in the other hash. 1787 */ 1788 if (in_hash && !in_notrace_hash) 1789 match = 1; 1790 } 1791 if (!match) 1792 continue; 1793 1794 if (inc) { 1795 rec->flags++; 1796 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1797 return false; 1798 1799 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1800 rec->flags |= FTRACE_FL_DIRECT; 1801 1802 /* 1803 * If there's only a single callback registered to a 1804 * function, and the ops has a trampoline registered 1805 * for it, then we can call it directly. 1806 */ 1807 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1808 rec->flags |= FTRACE_FL_TRAMP; 1809 else 1810 /* 1811 * If we are adding another function callback 1812 * to this function, and the previous had a 1813 * custom trampoline in use, then we need to go 1814 * back to the default trampoline. 1815 */ 1816 rec->flags &= ~FTRACE_FL_TRAMP; 1817 1818 /* 1819 * If any ops wants regs saved for this function 1820 * then all ops will get saved regs. 1821 */ 1822 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1823 rec->flags |= FTRACE_FL_REGS; 1824 } else { 1825 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1826 return false; 1827 rec->flags--; 1828 1829 /* 1830 * Only the internal direct_ops should have the 1831 * DIRECT flag set. Thus, if it is removing a 1832 * function, then that function should no longer 1833 * be direct. 1834 */ 1835 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1836 rec->flags &= ~FTRACE_FL_DIRECT; 1837 1838 /* 1839 * If the rec had REGS enabled and the ops that is 1840 * being removed had REGS set, then see if there is 1841 * still any ops for this record that wants regs. 1842 * If not, we can stop recording them. 1843 */ 1844 if (ftrace_rec_count(rec) > 0 && 1845 rec->flags & FTRACE_FL_REGS && 1846 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1847 if (!test_rec_ops_needs_regs(rec)) 1848 rec->flags &= ~FTRACE_FL_REGS; 1849 } 1850 1851 /* 1852 * The TRAMP needs to be set only if rec count 1853 * is decremented to one, and the ops that is 1854 * left has a trampoline. As TRAMP can only be 1855 * enabled if there is only a single ops attached 1856 * to it. 1857 */ 1858 if (ftrace_rec_count(rec) == 1 && 1859 ftrace_find_tramp_ops_any_other(rec, ops)) 1860 rec->flags |= FTRACE_FL_TRAMP; 1861 else 1862 rec->flags &= ~FTRACE_FL_TRAMP; 1863 1864 /* 1865 * flags will be cleared in ftrace_check_record() 1866 * if rec count is zero. 1867 */ 1868 } 1869 1870 /* 1871 * If the rec has a single associated ops, and ops->func can be 1872 * called directly, allow the call site to call via the ops. 1873 */ 1874 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && 1875 ftrace_rec_count(rec) == 1 && 1876 ftrace_ops_get_func(ops) == ops->func) 1877 rec->flags |= FTRACE_FL_CALL_OPS; 1878 else 1879 rec->flags &= ~FTRACE_FL_CALL_OPS; 1880 1881 count++; 1882 1883 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1884 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; 1885 1886 /* Shortcut, if we handled all records, we are done. */ 1887 if (!all && count == hash->count) 1888 return update; 1889 } while_for_each_ftrace_rec(); 1890 1891 return update; 1892 } 1893 1894 /* 1895 * This is called when an ops is removed from tracing. It will decrement 1896 * the counters of the dyn_ftrace records for all the functions that 1897 * the @ops attached to. 1898 */ 1899 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops) 1900 { 1901 return __ftrace_hash_rec_update(ops, false); 1902 } 1903 1904 /* 1905 * This is called when an ops is added to tracing. It will increment 1906 * the counters of the dyn_ftrace records for all the functions that 1907 * the @ops attached to. 1908 */ 1909 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops) 1910 { 1911 return __ftrace_hash_rec_update(ops, true); 1912 } 1913 1914 /* 1915 * This function will update what functions @ops traces when its filter 1916 * changes. 1917 * 1918 * The @inc states if the @ops callbacks are going to be added or removed. 1919 * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace 1920 * records are update via: 1921 * 1922 * ftrace_hash_rec_disable_modify(ops); 1923 * ops->hash = new_hash 1924 * ftrace_hash_rec_enable_modify(ops); 1925 * 1926 * Where the @ops is removed from all the records it is tracing using 1927 * its old hash. The @ops hash is updated to the new hash, and then 1928 * the @ops is added back to the records so that it is tracing all 1929 * the new functions. 1930 */ 1931 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc) 1932 { 1933 struct ftrace_ops *op; 1934 1935 __ftrace_hash_rec_update(ops, inc); 1936 1937 if (ops->func_hash != &global_ops.local_hash) 1938 return; 1939 1940 /* 1941 * If the ops shares the global_ops hash, then we need to update 1942 * all ops that are enabled and use this hash. 1943 */ 1944 do_for_each_ftrace_op(op, ftrace_ops_list) { 1945 /* Already done */ 1946 if (op == ops) 1947 continue; 1948 if (op->func_hash == &global_ops.local_hash) 1949 __ftrace_hash_rec_update(op, inc); 1950 } while_for_each_ftrace_op(op); 1951 } 1952 1953 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops) 1954 { 1955 ftrace_hash_rec_update_modify(ops, false); 1956 } 1957 1958 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops) 1959 { 1960 ftrace_hash_rec_update_modify(ops, true); 1961 } 1962 1963 /* 1964 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1965 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1966 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1967 * Note that old_hash and new_hash has below meanings 1968 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1969 * - If the hash is EMPTY_HASH, it hits nothing 1970 * - Anything else hits the recs which match the hash entries. 1971 * 1972 * DIRECT ops does not have IPMODIFY flag, but we still need to check it 1973 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call 1974 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with 1975 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate 1976 * the return value to the caller and eventually to the owner of the DIRECT 1977 * ops. 1978 */ 1979 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1980 struct ftrace_hash *old_hash, 1981 struct ftrace_hash *new_hash) 1982 { 1983 struct ftrace_page *pg; 1984 struct dyn_ftrace *rec, *end = NULL; 1985 int in_old, in_new; 1986 bool is_ipmodify, is_direct; 1987 1988 /* Only update if the ops has been registered */ 1989 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1990 return 0; 1991 1992 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; 1993 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; 1994 1995 /* neither IPMODIFY nor DIRECT, skip */ 1996 if (!is_ipmodify && !is_direct) 1997 return 0; 1998 1999 if (WARN_ON_ONCE(is_ipmodify && is_direct)) 2000 return 0; 2001 2002 /* 2003 * Since the IPMODIFY and DIRECT are very address sensitive 2004 * actions, we do not allow ftrace_ops to set all functions to new 2005 * hash. 2006 */ 2007 if (!new_hash || !old_hash) 2008 return -EINVAL; 2009 2010 /* Update rec->flags */ 2011 do_for_each_ftrace_rec(pg, rec) { 2012 2013 if (rec->flags & FTRACE_FL_DISABLED) 2014 continue; 2015 2016 /* We need to update only differences of filter_hash */ 2017 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 2018 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 2019 if (in_old == in_new) 2020 continue; 2021 2022 if (in_new) { 2023 if (rec->flags & FTRACE_FL_IPMODIFY) { 2024 int ret; 2025 2026 /* Cannot have two ipmodify on same rec */ 2027 if (is_ipmodify) 2028 goto rollback; 2029 2030 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); 2031 2032 /* 2033 * Another ops with IPMODIFY is already 2034 * attached. We are now attaching a direct 2035 * ops. Run SHARE_IPMODIFY_SELF, to check 2036 * whether sharing is supported. 2037 */ 2038 if (!ops->ops_func) 2039 return -EBUSY; 2040 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); 2041 if (ret) 2042 return ret; 2043 } else if (is_ipmodify) { 2044 rec->flags |= FTRACE_FL_IPMODIFY; 2045 } 2046 } else if (is_ipmodify) { 2047 rec->flags &= ~FTRACE_FL_IPMODIFY; 2048 } 2049 } while_for_each_ftrace_rec(); 2050 2051 return 0; 2052 2053 rollback: 2054 end = rec; 2055 2056 /* Roll back what we did above */ 2057 do_for_each_ftrace_rec(pg, rec) { 2058 2059 if (rec->flags & FTRACE_FL_DISABLED) 2060 continue; 2061 2062 if (rec == end) 2063 return -EBUSY; 2064 2065 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 2066 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 2067 if (in_old == in_new) 2068 continue; 2069 2070 if (in_new) 2071 rec->flags &= ~FTRACE_FL_IPMODIFY; 2072 else 2073 rec->flags |= FTRACE_FL_IPMODIFY; 2074 } while_for_each_ftrace_rec(); 2075 2076 return -EBUSY; 2077 } 2078 2079 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 2080 { 2081 struct ftrace_hash *hash = ops->func_hash->filter_hash; 2082 2083 if (ftrace_hash_empty(hash)) 2084 hash = NULL; 2085 2086 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 2087 } 2088 2089 /* Disabling always succeeds */ 2090 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 2091 { 2092 struct ftrace_hash *hash = ops->func_hash->filter_hash; 2093 2094 if (ftrace_hash_empty(hash)) 2095 hash = NULL; 2096 2097 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 2098 } 2099 2100 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 2101 struct ftrace_hash *new_hash) 2102 { 2103 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 2104 2105 if (ftrace_hash_empty(old_hash)) 2106 old_hash = NULL; 2107 2108 if (ftrace_hash_empty(new_hash)) 2109 new_hash = NULL; 2110 2111 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 2112 } 2113 2114 static void print_ip_ins(const char *fmt, const unsigned char *p) 2115 { 2116 char ins[MCOUNT_INSN_SIZE]; 2117 2118 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { 2119 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); 2120 return; 2121 } 2122 2123 printk(KERN_CONT "%s", fmt); 2124 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); 2125 } 2126 2127 enum ftrace_bug_type ftrace_bug_type; 2128 const void *ftrace_expected; 2129 2130 static void print_bug_type(void) 2131 { 2132 switch (ftrace_bug_type) { 2133 case FTRACE_BUG_UNKNOWN: 2134 break; 2135 case FTRACE_BUG_INIT: 2136 pr_info("Initializing ftrace call sites\n"); 2137 break; 2138 case FTRACE_BUG_NOP: 2139 pr_info("Setting ftrace call site to NOP\n"); 2140 break; 2141 case FTRACE_BUG_CALL: 2142 pr_info("Setting ftrace call site to call ftrace function\n"); 2143 break; 2144 case FTRACE_BUG_UPDATE: 2145 pr_info("Updating ftrace call site to call a different ftrace function\n"); 2146 break; 2147 } 2148 } 2149 2150 /** 2151 * ftrace_bug - report and shutdown function tracer 2152 * @failed: The failed type (EFAULT, EINVAL, EPERM) 2153 * @rec: The record that failed 2154 * 2155 * The arch code that enables or disables the function tracing 2156 * can call ftrace_bug() when it has detected a problem in 2157 * modifying the code. @failed should be one of either: 2158 * EFAULT - if the problem happens on reading the @ip address 2159 * EINVAL - if what is read at @ip is not what was expected 2160 * EPERM - if the problem happens on writing to the @ip address 2161 */ 2162 void ftrace_bug(int failed, struct dyn_ftrace *rec) 2163 { 2164 unsigned long ip = rec ? rec->ip : 0; 2165 2166 pr_info("------------[ ftrace bug ]------------\n"); 2167 2168 switch (failed) { 2169 case -EFAULT: 2170 pr_info("ftrace faulted on modifying "); 2171 print_ip_sym(KERN_INFO, ip); 2172 break; 2173 case -EINVAL: 2174 pr_info("ftrace failed to modify "); 2175 print_ip_sym(KERN_INFO, ip); 2176 print_ip_ins(" actual: ", (unsigned char *)ip); 2177 pr_cont("\n"); 2178 if (ftrace_expected) { 2179 print_ip_ins(" expected: ", ftrace_expected); 2180 pr_cont("\n"); 2181 } 2182 break; 2183 case -EPERM: 2184 pr_info("ftrace faulted on writing "); 2185 print_ip_sym(KERN_INFO, ip); 2186 break; 2187 default: 2188 pr_info("ftrace faulted on unknown error "); 2189 print_ip_sym(KERN_INFO, ip); 2190 } 2191 print_bug_type(); 2192 if (rec) { 2193 struct ftrace_ops *ops = NULL; 2194 2195 pr_info("ftrace record flags: %lx\n", rec->flags); 2196 pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), 2197 rec->flags & FTRACE_FL_REGS ? " R" : " ", 2198 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); 2199 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2200 ops = ftrace_find_tramp_ops_any(rec); 2201 if (ops) { 2202 do { 2203 pr_cont("\ttramp: %pS (%pS)", 2204 (void *)ops->trampoline, 2205 (void *)ops->func); 2206 ops = ftrace_find_tramp_ops_next(rec, ops); 2207 } while (ops); 2208 } else 2209 pr_cont("\ttramp: ERROR!"); 2210 2211 } 2212 ip = ftrace_get_addr_curr(rec); 2213 pr_cont("\n expected tramp: %lx\n", ip); 2214 } 2215 2216 FTRACE_WARN_ON_ONCE(1); 2217 } 2218 2219 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) 2220 { 2221 unsigned long flag = 0UL; 2222 2223 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2224 2225 if (skip_record(rec)) 2226 return FTRACE_UPDATE_IGNORE; 2227 2228 /* 2229 * If we are updating calls: 2230 * 2231 * If the record has a ref count, then we need to enable it 2232 * because someone is using it. 2233 * 2234 * Otherwise we make sure its disabled. 2235 * 2236 * If we are disabling calls, then disable all records that 2237 * are enabled. 2238 */ 2239 if (enable && ftrace_rec_count(rec)) 2240 flag = FTRACE_FL_ENABLED; 2241 2242 /* 2243 * If enabling and the REGS flag does not match the REGS_EN, or 2244 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2245 * this record. Set flags to fail the compare against ENABLED. 2246 * Same for direct calls. 2247 */ 2248 if (flag) { 2249 if (!(rec->flags & FTRACE_FL_REGS) != 2250 !(rec->flags & FTRACE_FL_REGS_EN)) 2251 flag |= FTRACE_FL_REGS; 2252 2253 if (!(rec->flags & FTRACE_FL_TRAMP) != 2254 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2255 flag |= FTRACE_FL_TRAMP; 2256 2257 /* 2258 * Direct calls are special, as count matters. 2259 * We must test the record for direct, if the 2260 * DIRECT and DIRECT_EN do not match, but only 2261 * if the count is 1. That's because, if the 2262 * count is something other than one, we do not 2263 * want the direct enabled (it will be done via the 2264 * direct helper). But if DIRECT_EN is set, and 2265 * the count is not one, we need to clear it. 2266 * 2267 */ 2268 if (ftrace_rec_count(rec) == 1) { 2269 if (!(rec->flags & FTRACE_FL_DIRECT) != 2270 !(rec->flags & FTRACE_FL_DIRECT_EN)) 2271 flag |= FTRACE_FL_DIRECT; 2272 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { 2273 flag |= FTRACE_FL_DIRECT; 2274 } 2275 2276 /* 2277 * Ops calls are special, as count matters. 2278 * As with direct calls, they must only be enabled when count 2279 * is one, otherwise they'll be handled via the list ops. 2280 */ 2281 if (ftrace_rec_count(rec) == 1) { 2282 if (!(rec->flags & FTRACE_FL_CALL_OPS) != 2283 !(rec->flags & FTRACE_FL_CALL_OPS_EN)) 2284 flag |= FTRACE_FL_CALL_OPS; 2285 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { 2286 flag |= FTRACE_FL_CALL_OPS; 2287 } 2288 } 2289 2290 /* If the state of this record hasn't changed, then do nothing */ 2291 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2292 return FTRACE_UPDATE_IGNORE; 2293 2294 if (flag) { 2295 /* Save off if rec is being enabled (for return value) */ 2296 flag ^= rec->flags & FTRACE_FL_ENABLED; 2297 2298 if (update) { 2299 rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; 2300 if (flag & FTRACE_FL_REGS) { 2301 if (rec->flags & FTRACE_FL_REGS) 2302 rec->flags |= FTRACE_FL_REGS_EN; 2303 else 2304 rec->flags &= ~FTRACE_FL_REGS_EN; 2305 } 2306 if (flag & FTRACE_FL_TRAMP) { 2307 if (rec->flags & FTRACE_FL_TRAMP) 2308 rec->flags |= FTRACE_FL_TRAMP_EN; 2309 else 2310 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2311 } 2312 2313 /* Keep track of anything that modifies the function */ 2314 if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) 2315 rec->flags |= FTRACE_FL_MODIFIED; 2316 2317 if (flag & FTRACE_FL_DIRECT) { 2318 /* 2319 * If there's only one user (direct_ops helper) 2320 * then we can call the direct function 2321 * directly (no ftrace trampoline). 2322 */ 2323 if (ftrace_rec_count(rec) == 1) { 2324 if (rec->flags & FTRACE_FL_DIRECT) 2325 rec->flags |= FTRACE_FL_DIRECT_EN; 2326 else 2327 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2328 } else { 2329 /* 2330 * Can only call directly if there's 2331 * only one callback to the function. 2332 */ 2333 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2334 } 2335 } 2336 2337 if (flag & FTRACE_FL_CALL_OPS) { 2338 if (ftrace_rec_count(rec) == 1) { 2339 if (rec->flags & FTRACE_FL_CALL_OPS) 2340 rec->flags |= FTRACE_FL_CALL_OPS_EN; 2341 else 2342 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; 2343 } else { 2344 /* 2345 * Can only call directly if there's 2346 * only one set of associated ops. 2347 */ 2348 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; 2349 } 2350 } 2351 } 2352 2353 /* 2354 * If this record is being updated from a nop, then 2355 * return UPDATE_MAKE_CALL. 2356 * Otherwise, 2357 * return UPDATE_MODIFY_CALL to tell the caller to convert 2358 * from the save regs, to a non-save regs function or 2359 * vice versa, or from a trampoline call. 2360 */ 2361 if (flag & FTRACE_FL_ENABLED) { 2362 ftrace_bug_type = FTRACE_BUG_CALL; 2363 return FTRACE_UPDATE_MAKE_CALL; 2364 } 2365 2366 ftrace_bug_type = FTRACE_BUG_UPDATE; 2367 return FTRACE_UPDATE_MODIFY_CALL; 2368 } 2369 2370 if (update) { 2371 /* If there's no more users, clear all flags */ 2372 if (!ftrace_rec_count(rec)) 2373 rec->flags &= FTRACE_NOCLEAR_FLAGS; 2374 else 2375 /* 2376 * Just disable the record, but keep the ops TRAMP 2377 * and REGS states. The _EN flags must be disabled though. 2378 */ 2379 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2380 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | 2381 FTRACE_FL_CALL_OPS_EN); 2382 } 2383 2384 ftrace_bug_type = FTRACE_BUG_NOP; 2385 return FTRACE_UPDATE_MAKE_NOP; 2386 } 2387 2388 /** 2389 * ftrace_update_record - set a record that now is tracing or not 2390 * @rec: the record to update 2391 * @enable: set to true if the record is tracing, false to force disable 2392 * 2393 * The records that represent all functions that can be traced need 2394 * to be updated when tracing has been enabled. 2395 */ 2396 int ftrace_update_record(struct dyn_ftrace *rec, bool enable) 2397 { 2398 return ftrace_check_record(rec, enable, true); 2399 } 2400 2401 /** 2402 * ftrace_test_record - check if the record has been enabled or not 2403 * @rec: the record to test 2404 * @enable: set to true to check if enabled, false if it is disabled 2405 * 2406 * The arch code may need to test if a record is already set to 2407 * tracing to determine how to modify the function code that it 2408 * represents. 2409 */ 2410 int ftrace_test_record(struct dyn_ftrace *rec, bool enable) 2411 { 2412 return ftrace_check_record(rec, enable, false); 2413 } 2414 2415 static struct ftrace_ops * 2416 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2417 { 2418 struct ftrace_ops *op; 2419 unsigned long ip = rec->ip; 2420 2421 do_for_each_ftrace_op(op, ftrace_ops_list) { 2422 2423 if (!op->trampoline) 2424 continue; 2425 2426 if (hash_contains_ip(ip, op->func_hash)) 2427 return op; 2428 } while_for_each_ftrace_op(op); 2429 2430 return NULL; 2431 } 2432 2433 static struct ftrace_ops * 2434 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) 2435 { 2436 struct ftrace_ops *op; 2437 unsigned long ip = rec->ip; 2438 2439 do_for_each_ftrace_op(op, ftrace_ops_list) { 2440 2441 if (op == op_exclude || !op->trampoline) 2442 continue; 2443 2444 if (hash_contains_ip(ip, op->func_hash)) 2445 return op; 2446 } while_for_each_ftrace_op(op); 2447 2448 return NULL; 2449 } 2450 2451 static struct ftrace_ops * 2452 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2453 struct ftrace_ops *op) 2454 { 2455 unsigned long ip = rec->ip; 2456 2457 while_for_each_ftrace_op(op) { 2458 2459 if (!op->trampoline) 2460 continue; 2461 2462 if (hash_contains_ip(ip, op->func_hash)) 2463 return op; 2464 } 2465 2466 return NULL; 2467 } 2468 2469 static struct ftrace_ops * 2470 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2471 { 2472 struct ftrace_ops *op; 2473 unsigned long ip = rec->ip; 2474 2475 /* 2476 * Need to check removed ops first. 2477 * If they are being removed, and this rec has a tramp, 2478 * and this rec is in the ops list, then it would be the 2479 * one with the tramp. 2480 */ 2481 if (removed_ops) { 2482 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2483 return removed_ops; 2484 } 2485 2486 /* 2487 * Need to find the current trampoline for a rec. 2488 * Now, a trampoline is only attached to a rec if there 2489 * was a single 'ops' attached to it. But this can be called 2490 * when we are adding another op to the rec or removing the 2491 * current one. Thus, if the op is being added, we can 2492 * ignore it because it hasn't attached itself to the rec 2493 * yet. 2494 * 2495 * If an ops is being modified (hooking to different functions) 2496 * then we don't care about the new functions that are being 2497 * added, just the old ones (that are probably being removed). 2498 * 2499 * If we are adding an ops to a function that already is using 2500 * a trampoline, it needs to be removed (trampolines are only 2501 * for single ops connected), then an ops that is not being 2502 * modified also needs to be checked. 2503 */ 2504 do_for_each_ftrace_op(op, ftrace_ops_list) { 2505 2506 if (!op->trampoline) 2507 continue; 2508 2509 /* 2510 * If the ops is being added, it hasn't gotten to 2511 * the point to be removed from this tree yet. 2512 */ 2513 if (op->flags & FTRACE_OPS_FL_ADDING) 2514 continue; 2515 2516 2517 /* 2518 * If the ops is being modified and is in the old 2519 * hash, then it is probably being removed from this 2520 * function. 2521 */ 2522 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2523 hash_contains_ip(ip, &op->old_hash)) 2524 return op; 2525 /* 2526 * If the ops is not being added or modified, and it's 2527 * in its normal filter hash, then this must be the one 2528 * we want! 2529 */ 2530 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2531 hash_contains_ip(ip, op->func_hash)) 2532 return op; 2533 2534 } while_for_each_ftrace_op(op); 2535 2536 return NULL; 2537 } 2538 2539 static struct ftrace_ops * 2540 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2541 { 2542 struct ftrace_ops *op; 2543 unsigned long ip = rec->ip; 2544 2545 do_for_each_ftrace_op(op, ftrace_ops_list) { 2546 /* pass rec in as regs to have non-NULL val */ 2547 if (hash_contains_ip(ip, op->func_hash)) 2548 return op; 2549 } while_for_each_ftrace_op(op); 2550 2551 return NULL; 2552 } 2553 2554 struct ftrace_ops * 2555 ftrace_find_unique_ops(struct dyn_ftrace *rec) 2556 { 2557 struct ftrace_ops *op, *found = NULL; 2558 unsigned long ip = rec->ip; 2559 2560 do_for_each_ftrace_op(op, ftrace_ops_list) { 2561 2562 if (hash_contains_ip(ip, op->func_hash)) { 2563 if (found) 2564 return NULL; 2565 found = op; 2566 } 2567 2568 } while_for_each_ftrace_op(op); 2569 2570 return found; 2571 } 2572 2573 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 2574 /* Protected by rcu_tasks for reading, and direct_mutex for writing */ 2575 static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH; 2576 static DEFINE_MUTEX(direct_mutex); 2577 2578 /* 2579 * Search the direct_functions hash to see if the given instruction pointer 2580 * has a direct caller attached to it. 2581 */ 2582 unsigned long ftrace_find_rec_direct(unsigned long ip) 2583 { 2584 struct ftrace_func_entry *entry; 2585 2586 entry = __ftrace_lookup_ip(direct_functions, ip); 2587 if (!entry) 2588 return 0; 2589 2590 return entry->direct; 2591 } 2592 2593 static void call_direct_funcs(unsigned long ip, unsigned long pip, 2594 struct ftrace_ops *ops, struct ftrace_regs *fregs) 2595 { 2596 unsigned long addr = READ_ONCE(ops->direct_call); 2597 2598 if (!addr) 2599 return; 2600 2601 arch_ftrace_set_direct_caller(fregs, addr); 2602 } 2603 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 2604 2605 /** 2606 * ftrace_get_addr_new - Get the call address to set to 2607 * @rec: The ftrace record descriptor 2608 * 2609 * If the record has the FTRACE_FL_REGS set, that means that it 2610 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2611 * is not set, then it wants to convert to the normal callback. 2612 * 2613 * Returns: the address of the trampoline to set to 2614 */ 2615 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2616 { 2617 struct ftrace_ops *ops; 2618 unsigned long addr; 2619 2620 if ((rec->flags & FTRACE_FL_DIRECT) && 2621 (ftrace_rec_count(rec) == 1)) { 2622 addr = ftrace_find_rec_direct(rec->ip); 2623 if (addr) 2624 return addr; 2625 WARN_ON_ONCE(1); 2626 } 2627 2628 /* Trampolines take precedence over regs */ 2629 if (rec->flags & FTRACE_FL_TRAMP) { 2630 ops = ftrace_find_tramp_ops_new(rec); 2631 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2632 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2633 (void *)rec->ip, (void *)rec->ip, rec->flags); 2634 /* Ftrace is shutting down, return anything */ 2635 return (unsigned long)FTRACE_ADDR; 2636 } 2637 return ops->trampoline; 2638 } 2639 2640 if (rec->flags & FTRACE_FL_REGS) 2641 return (unsigned long)FTRACE_REGS_ADDR; 2642 else 2643 return (unsigned long)FTRACE_ADDR; 2644 } 2645 2646 /** 2647 * ftrace_get_addr_curr - Get the call address that is already there 2648 * @rec: The ftrace record descriptor 2649 * 2650 * The FTRACE_FL_REGS_EN is set when the record already points to 2651 * a function that saves all the regs. Basically the '_EN' version 2652 * represents the current state of the function. 2653 * 2654 * Returns: the address of the trampoline that is currently being called 2655 */ 2656 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2657 { 2658 struct ftrace_ops *ops; 2659 unsigned long addr; 2660 2661 /* Direct calls take precedence over trampolines */ 2662 if (rec->flags & FTRACE_FL_DIRECT_EN) { 2663 addr = ftrace_find_rec_direct(rec->ip); 2664 if (addr) 2665 return addr; 2666 WARN_ON_ONCE(1); 2667 } 2668 2669 /* Trampolines take precedence over regs */ 2670 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2671 ops = ftrace_find_tramp_ops_curr(rec); 2672 if (FTRACE_WARN_ON(!ops)) { 2673 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2674 (void *)rec->ip, (void *)rec->ip); 2675 /* Ftrace is shutting down, return anything */ 2676 return (unsigned long)FTRACE_ADDR; 2677 } 2678 return ops->trampoline; 2679 } 2680 2681 if (rec->flags & FTRACE_FL_REGS_EN) 2682 return (unsigned long)FTRACE_REGS_ADDR; 2683 else 2684 return (unsigned long)FTRACE_ADDR; 2685 } 2686 2687 static int 2688 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) 2689 { 2690 unsigned long ftrace_old_addr; 2691 unsigned long ftrace_addr; 2692 int ret; 2693 2694 ftrace_addr = ftrace_get_addr_new(rec); 2695 2696 /* This needs to be done before we call ftrace_update_record */ 2697 ftrace_old_addr = ftrace_get_addr_curr(rec); 2698 2699 ret = ftrace_update_record(rec, enable); 2700 2701 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2702 2703 switch (ret) { 2704 case FTRACE_UPDATE_IGNORE: 2705 return 0; 2706 2707 case FTRACE_UPDATE_MAKE_CALL: 2708 ftrace_bug_type = FTRACE_BUG_CALL; 2709 return ftrace_make_call(rec, ftrace_addr); 2710 2711 case FTRACE_UPDATE_MAKE_NOP: 2712 ftrace_bug_type = FTRACE_BUG_NOP; 2713 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2714 2715 case FTRACE_UPDATE_MODIFY_CALL: 2716 ftrace_bug_type = FTRACE_BUG_UPDATE; 2717 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2718 } 2719 2720 return -1; /* unknown ftrace bug */ 2721 } 2722 2723 void __weak ftrace_replace_code(int mod_flags) 2724 { 2725 struct dyn_ftrace *rec; 2726 struct ftrace_page *pg; 2727 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2728 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2729 int failed; 2730 2731 if (unlikely(ftrace_disabled)) 2732 return; 2733 2734 do_for_each_ftrace_rec(pg, rec) { 2735 2736 if (skip_record(rec)) 2737 continue; 2738 2739 failed = __ftrace_replace_code(rec, enable); 2740 if (failed) { 2741 ftrace_bug(failed, rec); 2742 /* Stop processing */ 2743 return; 2744 } 2745 if (schedulable) 2746 cond_resched(); 2747 } while_for_each_ftrace_rec(); 2748 } 2749 2750 struct ftrace_rec_iter { 2751 struct ftrace_page *pg; 2752 int index; 2753 }; 2754 2755 /** 2756 * ftrace_rec_iter_start - start up iterating over traced functions 2757 * 2758 * Returns: an iterator handle that is used to iterate over all 2759 * the records that represent address locations where functions 2760 * are traced. 2761 * 2762 * May return NULL if no records are available. 2763 */ 2764 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2765 { 2766 /* 2767 * We only use a single iterator. 2768 * Protected by the ftrace_lock mutex. 2769 */ 2770 static struct ftrace_rec_iter ftrace_rec_iter; 2771 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2772 2773 iter->pg = ftrace_pages_start; 2774 iter->index = 0; 2775 2776 /* Could have empty pages */ 2777 while (iter->pg && !iter->pg->index) 2778 iter->pg = iter->pg->next; 2779 2780 if (!iter->pg) 2781 return NULL; 2782 2783 return iter; 2784 } 2785 2786 /** 2787 * ftrace_rec_iter_next - get the next record to process. 2788 * @iter: The handle to the iterator. 2789 * 2790 * Returns: the next iterator after the given iterator @iter. 2791 */ 2792 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2793 { 2794 iter->index++; 2795 2796 if (iter->index >= iter->pg->index) { 2797 iter->pg = iter->pg->next; 2798 iter->index = 0; 2799 2800 /* Could have empty pages */ 2801 while (iter->pg && !iter->pg->index) 2802 iter->pg = iter->pg->next; 2803 } 2804 2805 if (!iter->pg) 2806 return NULL; 2807 2808 return iter; 2809 } 2810 2811 /** 2812 * ftrace_rec_iter_record - get the record at the iterator location 2813 * @iter: The current iterator location 2814 * 2815 * Returns: the record that the current @iter is at. 2816 */ 2817 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2818 { 2819 return &iter->pg->records[iter->index]; 2820 } 2821 2822 static int 2823 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) 2824 { 2825 int ret; 2826 2827 if (unlikely(ftrace_disabled)) 2828 return 0; 2829 2830 ret = ftrace_init_nop(mod, rec); 2831 if (ret) { 2832 ftrace_bug_type = FTRACE_BUG_INIT; 2833 ftrace_bug(ret, rec); 2834 return 0; 2835 } 2836 return 1; 2837 } 2838 2839 /* 2840 * archs can override this function if they must do something 2841 * before the modifying code is performed. 2842 */ 2843 void __weak ftrace_arch_code_modify_prepare(void) 2844 { 2845 } 2846 2847 /* 2848 * archs can override this function if they must do something 2849 * after the modifying code is performed. 2850 */ 2851 void __weak ftrace_arch_code_modify_post_process(void) 2852 { 2853 } 2854 2855 static int update_ftrace_func(ftrace_func_t func) 2856 { 2857 static ftrace_func_t save_func; 2858 2859 /* Avoid updating if it hasn't changed */ 2860 if (func == save_func) 2861 return 0; 2862 2863 save_func = func; 2864 2865 return ftrace_update_ftrace_func(func); 2866 } 2867 2868 void ftrace_modify_all_code(int command) 2869 { 2870 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2871 int mod_flags = 0; 2872 int err = 0; 2873 2874 if (command & FTRACE_MAY_SLEEP) 2875 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2876 2877 /* 2878 * If the ftrace_caller calls a ftrace_ops func directly, 2879 * we need to make sure that it only traces functions it 2880 * expects to trace. When doing the switch of functions, 2881 * we need to update to the ftrace_ops_list_func first 2882 * before the transition between old and new calls are set, 2883 * as the ftrace_ops_list_func will check the ops hashes 2884 * to make sure the ops are having the right functions 2885 * traced. 2886 */ 2887 if (update) { 2888 err = update_ftrace_func(ftrace_ops_list_func); 2889 if (FTRACE_WARN_ON(err)) 2890 return; 2891 } 2892 2893 if (command & FTRACE_UPDATE_CALLS) 2894 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2895 else if (command & FTRACE_DISABLE_CALLS) 2896 ftrace_replace_code(mod_flags); 2897 2898 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2899 function_trace_op = set_function_trace_op; 2900 smp_wmb(); 2901 /* If irqs are disabled, we are in stop machine */ 2902 if (!irqs_disabled()) 2903 smp_call_function(ftrace_sync_ipi, NULL, 1); 2904 err = update_ftrace_func(ftrace_trace_function); 2905 if (FTRACE_WARN_ON(err)) 2906 return; 2907 } 2908 2909 if (command & FTRACE_START_FUNC_RET) 2910 err = ftrace_enable_ftrace_graph_caller(); 2911 else if (command & FTRACE_STOP_FUNC_RET) 2912 err = ftrace_disable_ftrace_graph_caller(); 2913 FTRACE_WARN_ON(err); 2914 } 2915 2916 static int __ftrace_modify_code(void *data) 2917 { 2918 int *command = data; 2919 2920 ftrace_modify_all_code(*command); 2921 2922 return 0; 2923 } 2924 2925 /** 2926 * ftrace_run_stop_machine - go back to the stop machine method 2927 * @command: The command to tell ftrace what to do 2928 * 2929 * If an arch needs to fall back to the stop machine method, the 2930 * it can call this function. 2931 */ 2932 void ftrace_run_stop_machine(int command) 2933 { 2934 stop_machine(__ftrace_modify_code, &command, NULL); 2935 } 2936 2937 /** 2938 * arch_ftrace_update_code - modify the code to trace or not trace 2939 * @command: The command that needs to be done 2940 * 2941 * Archs can override this function if it does not need to 2942 * run stop_machine() to modify code. 2943 */ 2944 void __weak arch_ftrace_update_code(int command) 2945 { 2946 ftrace_run_stop_machine(command); 2947 } 2948 2949 static void ftrace_run_update_code(int command) 2950 { 2951 ftrace_arch_code_modify_prepare(); 2952 2953 /* 2954 * By default we use stop_machine() to modify the code. 2955 * But archs can do what ever they want as long as it 2956 * is safe. The stop_machine() is the safest, but also 2957 * produces the most overhead. 2958 */ 2959 arch_ftrace_update_code(command); 2960 2961 ftrace_arch_code_modify_post_process(); 2962 } 2963 2964 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2965 struct ftrace_ops_hash *old_hash) 2966 { 2967 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2968 ops->old_hash.filter_hash = old_hash->filter_hash; 2969 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2970 ftrace_run_update_code(command); 2971 ops->old_hash.filter_hash = NULL; 2972 ops->old_hash.notrace_hash = NULL; 2973 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2974 } 2975 2976 static ftrace_func_t saved_ftrace_func; 2977 static int ftrace_start_up; 2978 2979 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2980 { 2981 } 2982 2983 /* List of trace_ops that have allocated trampolines */ 2984 static LIST_HEAD(ftrace_ops_trampoline_list); 2985 2986 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) 2987 { 2988 lockdep_assert_held(&ftrace_lock); 2989 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); 2990 } 2991 2992 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) 2993 { 2994 lockdep_assert_held(&ftrace_lock); 2995 list_del_rcu(&ops->list); 2996 synchronize_rcu(); 2997 } 2998 2999 /* 3000 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols 3001 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is 3002 * not a module. 3003 */ 3004 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" 3005 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" 3006 3007 static void ftrace_trampoline_free(struct ftrace_ops *ops) 3008 { 3009 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && 3010 ops->trampoline) { 3011 /* 3012 * Record the text poke event before the ksymbol unregister 3013 * event. 3014 */ 3015 perf_event_text_poke((void *)ops->trampoline, 3016 (void *)ops->trampoline, 3017 ops->trampoline_size, NULL, 0); 3018 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 3019 ops->trampoline, ops->trampoline_size, 3020 true, FTRACE_TRAMPOLINE_SYM); 3021 /* Remove from kallsyms after the perf events */ 3022 ftrace_remove_trampoline_from_kallsyms(ops); 3023 } 3024 3025 arch_ftrace_trampoline_free(ops); 3026 } 3027 3028 static void ftrace_startup_enable(int command) 3029 { 3030 if (saved_ftrace_func != ftrace_trace_function) { 3031 saved_ftrace_func = ftrace_trace_function; 3032 command |= FTRACE_UPDATE_TRACE_FUNC; 3033 } 3034 3035 if (!command || !ftrace_enabled) 3036 return; 3037 3038 ftrace_run_update_code(command); 3039 } 3040 3041 static void ftrace_startup_all(int command) 3042 { 3043 update_all_ops = true; 3044 ftrace_startup_enable(command); 3045 update_all_ops = false; 3046 } 3047 3048 int ftrace_startup(struct ftrace_ops *ops, int command) 3049 { 3050 int ret; 3051 3052 if (unlikely(ftrace_disabled)) 3053 return -ENODEV; 3054 3055 ret = __register_ftrace_function(ops); 3056 if (ret) 3057 return ret; 3058 3059 ftrace_start_up++; 3060 3061 /* 3062 * Note that ftrace probes uses this to start up 3063 * and modify functions it will probe. But we still 3064 * set the ADDING flag for modification, as probes 3065 * do not have trampolines. If they add them in the 3066 * future, then the probes will need to distinguish 3067 * between adding and updating probes. 3068 */ 3069 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 3070 3071 ret = ftrace_hash_ipmodify_enable(ops); 3072 if (ret < 0) { 3073 /* Rollback registration process */ 3074 __unregister_ftrace_function(ops); 3075 ftrace_start_up--; 3076 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 3077 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 3078 ftrace_trampoline_free(ops); 3079 return ret; 3080 } 3081 3082 if (ftrace_hash_rec_enable(ops)) 3083 command |= FTRACE_UPDATE_CALLS; 3084 3085 ftrace_startup_enable(command); 3086 3087 /* 3088 * If ftrace is in an undefined state, we just remove ops from list 3089 * to prevent the NULL pointer, instead of totally rolling it back and 3090 * free trampoline, because those actions could cause further damage. 3091 */ 3092 if (unlikely(ftrace_disabled)) { 3093 __unregister_ftrace_function(ops); 3094 return -ENODEV; 3095 } 3096 3097 ops->flags &= ~FTRACE_OPS_FL_ADDING; 3098 3099 return 0; 3100 } 3101 3102 int ftrace_shutdown(struct ftrace_ops *ops, int command) 3103 { 3104 int ret; 3105 3106 if (unlikely(ftrace_disabled)) 3107 return -ENODEV; 3108 3109 ret = __unregister_ftrace_function(ops); 3110 if (ret) 3111 return ret; 3112 3113 ftrace_start_up--; 3114 /* 3115 * Just warn in case of unbalance, no need to kill ftrace, it's not 3116 * critical but the ftrace_call callers may be never nopped again after 3117 * further ftrace uses. 3118 */ 3119 WARN_ON_ONCE(ftrace_start_up < 0); 3120 3121 /* Disabling ipmodify never fails */ 3122 ftrace_hash_ipmodify_disable(ops); 3123 3124 if (ftrace_hash_rec_disable(ops)) 3125 command |= FTRACE_UPDATE_CALLS; 3126 3127 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 3128 3129 if (saved_ftrace_func != ftrace_trace_function) { 3130 saved_ftrace_func = ftrace_trace_function; 3131 command |= FTRACE_UPDATE_TRACE_FUNC; 3132 } 3133 3134 if (!command || !ftrace_enabled) 3135 goto out; 3136 3137 /* 3138 * If the ops uses a trampoline, then it needs to be 3139 * tested first on update. 3140 */ 3141 ops->flags |= FTRACE_OPS_FL_REMOVING; 3142 removed_ops = ops; 3143 3144 /* The trampoline logic checks the old hashes */ 3145 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 3146 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 3147 3148 ftrace_run_update_code(command); 3149 3150 /* 3151 * If there's no more ops registered with ftrace, run a 3152 * sanity check to make sure all rec flags are cleared. 3153 */ 3154 if (rcu_dereference_protected(ftrace_ops_list, 3155 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 3156 struct ftrace_page *pg; 3157 struct dyn_ftrace *rec; 3158 3159 do_for_each_ftrace_rec(pg, rec) { 3160 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) 3161 pr_warn(" %pS flags:%lx\n", 3162 (void *)rec->ip, rec->flags); 3163 } while_for_each_ftrace_rec(); 3164 } 3165 3166 ops->old_hash.filter_hash = NULL; 3167 ops->old_hash.notrace_hash = NULL; 3168 3169 removed_ops = NULL; 3170 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 3171 3172 out: 3173 /* 3174 * Dynamic ops may be freed, we must make sure that all 3175 * callers are done before leaving this function. 3176 */ 3177 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 3178 /* 3179 * We need to do a hard force of sched synchronization. 3180 * This is because we use preempt_disable() to do RCU, but 3181 * the function tracers can be called where RCU is not watching 3182 * (like before user_exit()). We can not rely on the RCU 3183 * infrastructure to do the synchronization, thus we must do it 3184 * ourselves. 3185 */ 3186 synchronize_rcu_tasks_rude(); 3187 3188 /* 3189 * When the kernel is preemptive, tasks can be preempted 3190 * while on a ftrace trampoline. Just scheduling a task on 3191 * a CPU is not good enough to flush them. Calling 3192 * synchronize_rcu_tasks() will wait for those tasks to 3193 * execute and either schedule voluntarily or enter user space. 3194 */ 3195 synchronize_rcu_tasks(); 3196 3197 ftrace_trampoline_free(ops); 3198 } 3199 3200 return 0; 3201 } 3202 3203 /* Simply make a copy of @src and return it */ 3204 static struct ftrace_hash *copy_hash(struct ftrace_hash *src) 3205 { 3206 if (ftrace_hash_empty(src)) 3207 return EMPTY_HASH; 3208 3209 return alloc_and_copy_ftrace_hash(src->size_bits, src); 3210 } 3211 3212 /* 3213 * Append @new_hash entries to @hash: 3214 * 3215 * If @hash is the EMPTY_HASH then it traces all functions and nothing 3216 * needs to be done. 3217 * 3218 * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so 3219 * that it traces everything. 3220 * 3221 * Otherwise, go through all of @new_hash and add anything that @hash 3222 * doesn't already have, to @hash. 3223 * 3224 * The filter_hash updates uses just the append_hash() function 3225 * and the notrace_hash does not. 3226 */ 3227 static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash) 3228 { 3229 struct ftrace_func_entry *entry; 3230 int size; 3231 int i; 3232 3233 /* An empty hash does everything */ 3234 if (ftrace_hash_empty(*hash)) 3235 return 0; 3236 3237 /* If new_hash has everything make hash have everything */ 3238 if (ftrace_hash_empty(new_hash)) { 3239 free_ftrace_hash(*hash); 3240 *hash = EMPTY_HASH; 3241 return 0; 3242 } 3243 3244 size = 1 << new_hash->size_bits; 3245 for (i = 0; i < size; i++) { 3246 hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) { 3247 /* Only add if not already in hash */ 3248 if (!__ftrace_lookup_ip(*hash, entry->ip) && 3249 add_hash_entry(*hash, entry->ip) == NULL) 3250 return -ENOMEM; 3251 } 3252 } 3253 return 0; 3254 } 3255 3256 /* 3257 * Add to @hash only those that are in both @new_hash1 and @new_hash2 3258 * 3259 * The notrace_hash updates uses just the intersect_hash() function 3260 * and the filter_hash does not. 3261 */ 3262 static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1, 3263 struct ftrace_hash *new_hash2) 3264 { 3265 struct ftrace_func_entry *entry; 3266 int size; 3267 int i; 3268 3269 /* 3270 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash 3271 * empty as well as empty for notrace means none are notraced. 3272 */ 3273 if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) { 3274 free_ftrace_hash(*hash); 3275 *hash = EMPTY_HASH; 3276 return 0; 3277 } 3278 3279 size = 1 << new_hash1->size_bits; 3280 for (i = 0; i < size; i++) { 3281 hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) { 3282 /* Only add if in both @new_hash1 and @new_hash2 */ 3283 if (__ftrace_lookup_ip(new_hash2, entry->ip) && 3284 add_hash_entry(*hash, entry->ip) == NULL) 3285 return -ENOMEM; 3286 } 3287 } 3288 /* If nothing intersects, make it the empty set */ 3289 if (ftrace_hash_empty(*hash)) { 3290 free_ftrace_hash(*hash); 3291 *hash = EMPTY_HASH; 3292 } 3293 return 0; 3294 } 3295 3296 /* Return a new hash that has a union of all @ops->filter_hash entries */ 3297 static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) 3298 { 3299 struct ftrace_hash *new_hash; 3300 struct ftrace_ops *subops; 3301 int ret; 3302 3303 new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits); 3304 if (!new_hash) 3305 return NULL; 3306 3307 list_for_each_entry(subops, &ops->subop_list, list) { 3308 ret = append_hash(&new_hash, subops->func_hash->filter_hash); 3309 if (ret < 0) { 3310 free_ftrace_hash(new_hash); 3311 return NULL; 3312 } 3313 /* Nothing more to do if new_hash is empty */ 3314 if (ftrace_hash_empty(new_hash)) 3315 break; 3316 } 3317 return new_hash; 3318 } 3319 3320 /* Make @ops trace evenything except what all its subops do not trace */ 3321 static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops) 3322 { 3323 struct ftrace_hash *new_hash = NULL; 3324 struct ftrace_ops *subops; 3325 int size_bits; 3326 int ret; 3327 3328 list_for_each_entry(subops, &ops->subop_list, list) { 3329 struct ftrace_hash *next_hash; 3330 3331 if (!new_hash) { 3332 size_bits = subops->func_hash->notrace_hash->size_bits; 3333 new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash); 3334 if (!new_hash) 3335 return NULL; 3336 continue; 3337 } 3338 size_bits = new_hash->size_bits; 3339 next_hash = new_hash; 3340 new_hash = alloc_ftrace_hash(size_bits); 3341 ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash); 3342 free_ftrace_hash(next_hash); 3343 if (ret < 0) { 3344 free_ftrace_hash(new_hash); 3345 return NULL; 3346 } 3347 /* Nothing more to do if new_hash is empty */ 3348 if (ftrace_hash_empty(new_hash)) 3349 break; 3350 } 3351 return new_hash; 3352 } 3353 3354 static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B) 3355 { 3356 struct ftrace_func_entry *entry; 3357 int size; 3358 int i; 3359 3360 if (ftrace_hash_empty(A)) 3361 return ftrace_hash_empty(B); 3362 3363 if (ftrace_hash_empty(B)) 3364 return ftrace_hash_empty(A); 3365 3366 if (A->count != B->count) 3367 return false; 3368 3369 size = 1 << A->size_bits; 3370 for (i = 0; i < size; i++) { 3371 hlist_for_each_entry(entry, &A->buckets[i], hlist) { 3372 if (!__ftrace_lookup_ip(B, entry->ip)) 3373 return false; 3374 } 3375 } 3376 3377 return true; 3378 } 3379 3380 static void ftrace_ops_update_code(struct ftrace_ops *ops, 3381 struct ftrace_ops_hash *old_hash); 3382 3383 static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 3384 struct ftrace_hash **orig_hash, 3385 struct ftrace_hash *hash, 3386 int enable) 3387 { 3388 struct ftrace_ops_hash old_hash_ops; 3389 struct ftrace_hash *old_hash; 3390 int ret; 3391 3392 old_hash = *orig_hash; 3393 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 3394 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 3395 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3396 if (!ret) { 3397 ftrace_ops_update_code(ops, &old_hash_ops); 3398 free_ftrace_hash_rcu(old_hash); 3399 } 3400 return ret; 3401 } 3402 3403 static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash, 3404 struct ftrace_hash *notrace_hash) 3405 { 3406 int ret; 3407 3408 if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) { 3409 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash, 3410 filter_hash, 1); 3411 if (ret < 0) 3412 return ret; 3413 } 3414 3415 if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) { 3416 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash, 3417 notrace_hash, 0); 3418 if (ret < 0) 3419 return ret; 3420 } 3421 3422 return 0; 3423 } 3424 3425 /** 3426 * ftrace_startup_subops - enable tracing for subops of an ops 3427 * @ops: Manager ops (used to pick all the functions of its subops) 3428 * @subops: A new ops to add to @ops 3429 * @command: Extra commands to use to enable tracing 3430 * 3431 * The @ops is a manager @ops that has the filter that includes all the functions 3432 * that its list of subops are tracing. Adding a new @subops will add the 3433 * functions of @subops to @ops. 3434 */ 3435 int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 3436 { 3437 struct ftrace_hash *filter_hash; 3438 struct ftrace_hash *notrace_hash; 3439 struct ftrace_hash *save_filter_hash; 3440 struct ftrace_hash *save_notrace_hash; 3441 int size_bits; 3442 int ret; 3443 3444 if (unlikely(ftrace_disabled)) 3445 return -ENODEV; 3446 3447 ftrace_ops_init(ops); 3448 ftrace_ops_init(subops); 3449 3450 if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED)) 3451 return -EBUSY; 3452 3453 /* Make everything canonical (Just in case!) */ 3454 if (!ops->func_hash->filter_hash) 3455 ops->func_hash->filter_hash = EMPTY_HASH; 3456 if (!ops->func_hash->notrace_hash) 3457 ops->func_hash->notrace_hash = EMPTY_HASH; 3458 if (!subops->func_hash->filter_hash) 3459 subops->func_hash->filter_hash = EMPTY_HASH; 3460 if (!subops->func_hash->notrace_hash) 3461 subops->func_hash->notrace_hash = EMPTY_HASH; 3462 3463 /* For the first subops to ops just enable it normally */ 3464 if (list_empty(&ops->subop_list)) { 3465 /* Just use the subops hashes */ 3466 filter_hash = copy_hash(subops->func_hash->filter_hash); 3467 notrace_hash = copy_hash(subops->func_hash->notrace_hash); 3468 if (!filter_hash || !notrace_hash) { 3469 free_ftrace_hash(filter_hash); 3470 free_ftrace_hash(notrace_hash); 3471 return -ENOMEM; 3472 } 3473 3474 save_filter_hash = ops->func_hash->filter_hash; 3475 save_notrace_hash = ops->func_hash->notrace_hash; 3476 3477 ops->func_hash->filter_hash = filter_hash; 3478 ops->func_hash->notrace_hash = notrace_hash; 3479 list_add(&subops->list, &ops->subop_list); 3480 ret = ftrace_startup(ops, command); 3481 if (ret < 0) { 3482 list_del(&subops->list); 3483 ops->func_hash->filter_hash = save_filter_hash; 3484 ops->func_hash->notrace_hash = save_notrace_hash; 3485 free_ftrace_hash(filter_hash); 3486 free_ftrace_hash(notrace_hash); 3487 } else { 3488 free_ftrace_hash(save_filter_hash); 3489 free_ftrace_hash(save_notrace_hash); 3490 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; 3491 subops->managed = ops; 3492 } 3493 return ret; 3494 } 3495 3496 /* 3497 * Here there's already something attached. Here are the rules: 3498 * o If either filter_hash is empty then the final stays empty 3499 * o Otherwise, the final is a superset of both hashes 3500 * o If either notrace_hash is empty then the final stays empty 3501 * o Otherwise, the final is an intersection between the hashes 3502 */ 3503 if (ftrace_hash_empty(ops->func_hash->filter_hash) || 3504 ftrace_hash_empty(subops->func_hash->filter_hash)) { 3505 filter_hash = EMPTY_HASH; 3506 } else { 3507 size_bits = max(ops->func_hash->filter_hash->size_bits, 3508 subops->func_hash->filter_hash->size_bits); 3509 filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash); 3510 if (!filter_hash) 3511 return -ENOMEM; 3512 ret = append_hash(&filter_hash, subops->func_hash->filter_hash); 3513 if (ret < 0) { 3514 free_ftrace_hash(filter_hash); 3515 return ret; 3516 } 3517 } 3518 3519 if (ftrace_hash_empty(ops->func_hash->notrace_hash) || 3520 ftrace_hash_empty(subops->func_hash->notrace_hash)) { 3521 notrace_hash = EMPTY_HASH; 3522 } else { 3523 size_bits = max(ops->func_hash->filter_hash->size_bits, 3524 subops->func_hash->filter_hash->size_bits); 3525 notrace_hash = alloc_ftrace_hash(size_bits); 3526 if (!notrace_hash) { 3527 free_ftrace_hash(filter_hash); 3528 return -ENOMEM; 3529 } 3530 3531 ret = intersect_hash(¬race_hash, ops->func_hash->filter_hash, 3532 subops->func_hash->filter_hash); 3533 if (ret < 0) { 3534 free_ftrace_hash(filter_hash); 3535 free_ftrace_hash(notrace_hash); 3536 return ret; 3537 } 3538 } 3539 3540 list_add(&subops->list, &ops->subop_list); 3541 3542 ret = ftrace_update_ops(ops, filter_hash, notrace_hash); 3543 free_ftrace_hash(filter_hash); 3544 free_ftrace_hash(notrace_hash); 3545 if (ret < 0) { 3546 list_del(&subops->list); 3547 } else { 3548 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; 3549 subops->managed = ops; 3550 } 3551 return ret; 3552 } 3553 3554 /** 3555 * ftrace_shutdown_subops - Remove a subops from a manager ops 3556 * @ops: A manager ops to remove @subops from 3557 * @subops: The subops to remove from @ops 3558 * @command: Any extra command flags to add to modifying the text 3559 * 3560 * Removes the functions being traced by the @subops from @ops. Note, it 3561 * will not affect functions that are being traced by other subops that 3562 * still exist in @ops. 3563 * 3564 * If the last subops is removed from @ops, then @ops is shutdown normally. 3565 */ 3566 int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 3567 { 3568 struct ftrace_hash *filter_hash; 3569 struct ftrace_hash *notrace_hash; 3570 int ret; 3571 3572 if (unlikely(ftrace_disabled)) 3573 return -ENODEV; 3574 3575 if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED))) 3576 return -EINVAL; 3577 3578 list_del(&subops->list); 3579 3580 if (list_empty(&ops->subop_list)) { 3581 /* Last one, just disable the current ops */ 3582 3583 ret = ftrace_shutdown(ops, command); 3584 if (ret < 0) { 3585 list_add(&subops->list, &ops->subop_list); 3586 return ret; 3587 } 3588 3589 subops->flags &= ~FTRACE_OPS_FL_ENABLED; 3590 3591 free_ftrace_hash(ops->func_hash->filter_hash); 3592 free_ftrace_hash(ops->func_hash->notrace_hash); 3593 ops->func_hash->filter_hash = EMPTY_HASH; 3594 ops->func_hash->notrace_hash = EMPTY_HASH; 3595 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); 3596 subops->managed = NULL; 3597 3598 return 0; 3599 } 3600 3601 /* Rebuild the hashes without subops */ 3602 filter_hash = append_hashes(ops); 3603 notrace_hash = intersect_hashes(ops); 3604 if (!filter_hash || !notrace_hash) { 3605 free_ftrace_hash(filter_hash); 3606 free_ftrace_hash(notrace_hash); 3607 list_add(&subops->list, &ops->subop_list); 3608 return -ENOMEM; 3609 } 3610 3611 ret = ftrace_update_ops(ops, filter_hash, notrace_hash); 3612 if (ret < 0) { 3613 list_add(&subops->list, &ops->subop_list); 3614 } else { 3615 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); 3616 subops->managed = NULL; 3617 } 3618 free_ftrace_hash(filter_hash); 3619 free_ftrace_hash(notrace_hash); 3620 return ret; 3621 } 3622 3623 static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, 3624 struct ftrace_hash **orig_subhash, 3625 struct ftrace_hash *hash, 3626 int enable) 3627 { 3628 struct ftrace_ops *ops = subops->managed; 3629 struct ftrace_hash **orig_hash; 3630 struct ftrace_hash *save_hash; 3631 struct ftrace_hash *new_hash; 3632 int ret; 3633 3634 /* Manager ops can not be subops (yet) */ 3635 if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) 3636 return -EINVAL; 3637 3638 /* Move the new hash over to the subops hash */ 3639 save_hash = *orig_subhash; 3640 *orig_subhash = __ftrace_hash_move(hash); 3641 if (!*orig_subhash) { 3642 *orig_subhash = save_hash; 3643 return -ENOMEM; 3644 } 3645 3646 /* Create a new_hash to hold the ops new functions */ 3647 if (enable) { 3648 orig_hash = &ops->func_hash->filter_hash; 3649 new_hash = append_hashes(ops); 3650 } else { 3651 orig_hash = &ops->func_hash->notrace_hash; 3652 new_hash = intersect_hashes(ops); 3653 } 3654 3655 /* Move the hash over to the new hash */ 3656 ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); 3657 3658 free_ftrace_hash(new_hash); 3659 3660 if (ret) { 3661 /* Put back the original hash */ 3662 free_ftrace_hash_rcu(*orig_subhash); 3663 *orig_subhash = save_hash; 3664 } else { 3665 free_ftrace_hash_rcu(save_hash); 3666 } 3667 return ret; 3668 } 3669 3670 3671 u64 ftrace_update_time; 3672 u64 ftrace_total_mod_time; 3673 unsigned long ftrace_update_tot_cnt; 3674 unsigned long ftrace_number_of_pages; 3675 unsigned long ftrace_number_of_groups; 3676 3677 static inline int ops_traces_mod(struct ftrace_ops *ops) 3678 { 3679 /* 3680 * Filter_hash being empty will default to trace module. 3681 * But notrace hash requires a test of individual module functions. 3682 */ 3683 return ftrace_hash_empty(ops->func_hash->filter_hash) && 3684 ftrace_hash_empty(ops->func_hash->notrace_hash); 3685 } 3686 3687 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 3688 { 3689 bool init_nop = ftrace_need_init_nop(); 3690 struct ftrace_page *pg; 3691 struct dyn_ftrace *p; 3692 u64 start, stop, update_time; 3693 unsigned long update_cnt = 0; 3694 unsigned long rec_flags = 0; 3695 int i; 3696 3697 start = ftrace_now(raw_smp_processor_id()); 3698 3699 /* 3700 * When a module is loaded, this function is called to convert 3701 * the calls to mcount in its text to nops, and also to create 3702 * an entry in the ftrace data. Now, if ftrace is activated 3703 * after this call, but before the module sets its text to 3704 * read-only, the modification of enabling ftrace can fail if 3705 * the read-only is done while ftrace is converting the calls. 3706 * To prevent this, the module's records are set as disabled 3707 * and will be enabled after the call to set the module's text 3708 * to read-only. 3709 */ 3710 if (mod) 3711 rec_flags |= FTRACE_FL_DISABLED; 3712 3713 for (pg = new_pgs; pg; pg = pg->next) { 3714 3715 for (i = 0; i < pg->index; i++) { 3716 3717 /* If something went wrong, bail without enabling anything */ 3718 if (unlikely(ftrace_disabled)) 3719 return -1; 3720 3721 p = &pg->records[i]; 3722 p->flags = rec_flags; 3723 3724 /* 3725 * Do the initial record conversion from mcount jump 3726 * to the NOP instructions. 3727 */ 3728 if (init_nop && !ftrace_nop_initialize(mod, p)) 3729 break; 3730 3731 update_cnt++; 3732 } 3733 } 3734 3735 stop = ftrace_now(raw_smp_processor_id()); 3736 update_time = stop - start; 3737 if (mod) 3738 ftrace_total_mod_time += update_time; 3739 else 3740 ftrace_update_time = update_time; 3741 ftrace_update_tot_cnt += update_cnt; 3742 3743 return 0; 3744 } 3745 3746 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3747 { 3748 int order; 3749 int pages; 3750 int cnt; 3751 3752 if (WARN_ON(!count)) 3753 return -EINVAL; 3754 3755 /* We want to fill as much as possible, with no empty pages */ 3756 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 3757 order = fls(pages) - 1; 3758 3759 again: 3760 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 3761 3762 if (!pg->records) { 3763 /* if we can't allocate this size, try something smaller */ 3764 if (!order) 3765 return -ENOMEM; 3766 order--; 3767 goto again; 3768 } 3769 3770 ftrace_number_of_pages += 1 << order; 3771 ftrace_number_of_groups++; 3772 3773 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 3774 pg->order = order; 3775 3776 if (cnt > count) 3777 cnt = count; 3778 3779 return cnt; 3780 } 3781 3782 static void ftrace_free_pages(struct ftrace_page *pages) 3783 { 3784 struct ftrace_page *pg = pages; 3785 3786 while (pg) { 3787 if (pg->records) { 3788 free_pages((unsigned long)pg->records, pg->order); 3789 ftrace_number_of_pages -= 1 << pg->order; 3790 } 3791 pages = pg->next; 3792 kfree(pg); 3793 pg = pages; 3794 ftrace_number_of_groups--; 3795 } 3796 } 3797 3798 static struct ftrace_page * 3799 ftrace_allocate_pages(unsigned long num_to_init) 3800 { 3801 struct ftrace_page *start_pg; 3802 struct ftrace_page *pg; 3803 int cnt; 3804 3805 if (!num_to_init) 3806 return NULL; 3807 3808 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3809 if (!pg) 3810 return NULL; 3811 3812 /* 3813 * Try to allocate as much as possible in one continues 3814 * location that fills in all of the space. We want to 3815 * waste as little space as possible. 3816 */ 3817 for (;;) { 3818 cnt = ftrace_allocate_records(pg, num_to_init); 3819 if (cnt < 0) 3820 goto free_pages; 3821 3822 num_to_init -= cnt; 3823 if (!num_to_init) 3824 break; 3825 3826 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3827 if (!pg->next) 3828 goto free_pages; 3829 3830 pg = pg->next; 3831 } 3832 3833 return start_pg; 3834 3835 free_pages: 3836 ftrace_free_pages(start_pg); 3837 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3838 return NULL; 3839 } 3840 3841 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3842 3843 struct ftrace_iterator { 3844 loff_t pos; 3845 loff_t func_pos; 3846 loff_t mod_pos; 3847 struct ftrace_page *pg; 3848 struct dyn_ftrace *func; 3849 struct ftrace_func_probe *probe; 3850 struct ftrace_func_entry *probe_entry; 3851 struct trace_parser parser; 3852 struct ftrace_hash *hash; 3853 struct ftrace_ops *ops; 3854 struct trace_array *tr; 3855 struct list_head *mod_list; 3856 int pidx; 3857 int idx; 3858 unsigned flags; 3859 }; 3860 3861 static void * 3862 t_probe_next(struct seq_file *m, loff_t *pos) 3863 { 3864 struct ftrace_iterator *iter = m->private; 3865 struct trace_array *tr = iter->ops->private; 3866 struct list_head *func_probes; 3867 struct ftrace_hash *hash; 3868 struct list_head *next; 3869 struct hlist_node *hnd = NULL; 3870 struct hlist_head *hhd; 3871 int size; 3872 3873 (*pos)++; 3874 iter->pos = *pos; 3875 3876 if (!tr) 3877 return NULL; 3878 3879 func_probes = &tr->func_probes; 3880 if (list_empty(func_probes)) 3881 return NULL; 3882 3883 if (!iter->probe) { 3884 next = func_probes->next; 3885 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3886 } 3887 3888 if (iter->probe_entry) 3889 hnd = &iter->probe_entry->hlist; 3890 3891 hash = iter->probe->ops.func_hash->filter_hash; 3892 3893 /* 3894 * A probe being registered may temporarily have an empty hash 3895 * and it's at the end of the func_probes list. 3896 */ 3897 if (!hash || hash == EMPTY_HASH) 3898 return NULL; 3899 3900 size = 1 << hash->size_bits; 3901 3902 retry: 3903 if (iter->pidx >= size) { 3904 if (iter->probe->list.next == func_probes) 3905 return NULL; 3906 next = iter->probe->list.next; 3907 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3908 hash = iter->probe->ops.func_hash->filter_hash; 3909 size = 1 << hash->size_bits; 3910 iter->pidx = 0; 3911 } 3912 3913 hhd = &hash->buckets[iter->pidx]; 3914 3915 if (hlist_empty(hhd)) { 3916 iter->pidx++; 3917 hnd = NULL; 3918 goto retry; 3919 } 3920 3921 if (!hnd) 3922 hnd = hhd->first; 3923 else { 3924 hnd = hnd->next; 3925 if (!hnd) { 3926 iter->pidx++; 3927 goto retry; 3928 } 3929 } 3930 3931 if (WARN_ON_ONCE(!hnd)) 3932 return NULL; 3933 3934 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3935 3936 return iter; 3937 } 3938 3939 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3940 { 3941 struct ftrace_iterator *iter = m->private; 3942 void *p = NULL; 3943 loff_t l; 3944 3945 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3946 return NULL; 3947 3948 if (iter->mod_pos > *pos) 3949 return NULL; 3950 3951 iter->probe = NULL; 3952 iter->probe_entry = NULL; 3953 iter->pidx = 0; 3954 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3955 p = t_probe_next(m, &l); 3956 if (!p) 3957 break; 3958 } 3959 if (!p) 3960 return NULL; 3961 3962 /* Only set this if we have an item */ 3963 iter->flags |= FTRACE_ITER_PROBE; 3964 3965 return iter; 3966 } 3967 3968 static int 3969 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3970 { 3971 struct ftrace_func_entry *probe_entry; 3972 struct ftrace_probe_ops *probe_ops; 3973 struct ftrace_func_probe *probe; 3974 3975 probe = iter->probe; 3976 probe_entry = iter->probe_entry; 3977 3978 if (WARN_ON_ONCE(!probe || !probe_entry)) 3979 return -EIO; 3980 3981 probe_ops = probe->probe_ops; 3982 3983 if (probe_ops->print) 3984 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3985 3986 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3987 (void *)probe_ops->func); 3988 3989 return 0; 3990 } 3991 3992 static void * 3993 t_mod_next(struct seq_file *m, loff_t *pos) 3994 { 3995 struct ftrace_iterator *iter = m->private; 3996 struct trace_array *tr = iter->tr; 3997 3998 (*pos)++; 3999 iter->pos = *pos; 4000 4001 iter->mod_list = iter->mod_list->next; 4002 4003 if (iter->mod_list == &tr->mod_trace || 4004 iter->mod_list == &tr->mod_notrace) { 4005 iter->flags &= ~FTRACE_ITER_MOD; 4006 return NULL; 4007 } 4008 4009 iter->mod_pos = *pos; 4010 4011 return iter; 4012 } 4013 4014 static void *t_mod_start(struct seq_file *m, loff_t *pos) 4015 { 4016 struct ftrace_iterator *iter = m->private; 4017 void *p = NULL; 4018 loff_t l; 4019 4020 if (iter->func_pos > *pos) 4021 return NULL; 4022 4023 iter->mod_pos = iter->func_pos; 4024 4025 /* probes are only available if tr is set */ 4026 if (!iter->tr) 4027 return NULL; 4028 4029 for (l = 0; l <= (*pos - iter->func_pos); ) { 4030 p = t_mod_next(m, &l); 4031 if (!p) 4032 break; 4033 } 4034 if (!p) { 4035 iter->flags &= ~FTRACE_ITER_MOD; 4036 return t_probe_start(m, pos); 4037 } 4038 4039 /* Only set this if we have an item */ 4040 iter->flags |= FTRACE_ITER_MOD; 4041 4042 return iter; 4043 } 4044 4045 static int 4046 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 4047 { 4048 struct ftrace_mod_load *ftrace_mod; 4049 struct trace_array *tr = iter->tr; 4050 4051 if (WARN_ON_ONCE(!iter->mod_list) || 4052 iter->mod_list == &tr->mod_trace || 4053 iter->mod_list == &tr->mod_notrace) 4054 return -EIO; 4055 4056 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 4057 4058 if (ftrace_mod->func) 4059 seq_printf(m, "%s", ftrace_mod->func); 4060 else 4061 seq_putc(m, '*'); 4062 4063 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 4064 4065 return 0; 4066 } 4067 4068 static void * 4069 t_func_next(struct seq_file *m, loff_t *pos) 4070 { 4071 struct ftrace_iterator *iter = m->private; 4072 struct dyn_ftrace *rec = NULL; 4073 4074 (*pos)++; 4075 4076 retry: 4077 if (iter->idx >= iter->pg->index) { 4078 if (iter->pg->next) { 4079 iter->pg = iter->pg->next; 4080 iter->idx = 0; 4081 goto retry; 4082 } 4083 } else { 4084 rec = &iter->pg->records[iter->idx++]; 4085 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 4086 !ftrace_lookup_ip(iter->hash, rec->ip)) || 4087 4088 ((iter->flags & FTRACE_ITER_ENABLED) && 4089 !(rec->flags & FTRACE_FL_ENABLED)) || 4090 4091 ((iter->flags & FTRACE_ITER_TOUCHED) && 4092 !(rec->flags & FTRACE_FL_TOUCHED))) { 4093 4094 rec = NULL; 4095 goto retry; 4096 } 4097 } 4098 4099 if (!rec) 4100 return NULL; 4101 4102 iter->pos = iter->func_pos = *pos; 4103 iter->func = rec; 4104 4105 return iter; 4106 } 4107 4108 static void * 4109 t_next(struct seq_file *m, void *v, loff_t *pos) 4110 { 4111 struct ftrace_iterator *iter = m->private; 4112 loff_t l = *pos; /* t_probe_start() must use original pos */ 4113 void *ret; 4114 4115 if (unlikely(ftrace_disabled)) 4116 return NULL; 4117 4118 if (iter->flags & FTRACE_ITER_PROBE) 4119 return t_probe_next(m, pos); 4120 4121 if (iter->flags & FTRACE_ITER_MOD) 4122 return t_mod_next(m, pos); 4123 4124 if (iter->flags & FTRACE_ITER_PRINTALL) { 4125 /* next must increment pos, and t_probe_start does not */ 4126 (*pos)++; 4127 return t_mod_start(m, &l); 4128 } 4129 4130 ret = t_func_next(m, pos); 4131 4132 if (!ret) 4133 return t_mod_start(m, &l); 4134 4135 return ret; 4136 } 4137 4138 static void reset_iter_read(struct ftrace_iterator *iter) 4139 { 4140 iter->pos = 0; 4141 iter->func_pos = 0; 4142 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 4143 } 4144 4145 static void *t_start(struct seq_file *m, loff_t *pos) 4146 { 4147 struct ftrace_iterator *iter = m->private; 4148 void *p = NULL; 4149 loff_t l; 4150 4151 mutex_lock(&ftrace_lock); 4152 4153 if (unlikely(ftrace_disabled)) 4154 return NULL; 4155 4156 /* 4157 * If an lseek was done, then reset and start from beginning. 4158 */ 4159 if (*pos < iter->pos) 4160 reset_iter_read(iter); 4161 4162 /* 4163 * For set_ftrace_filter reading, if we have the filter 4164 * off, we can short cut and just print out that all 4165 * functions are enabled. 4166 */ 4167 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 4168 ftrace_hash_empty(iter->hash)) { 4169 iter->func_pos = 1; /* Account for the message */ 4170 if (*pos > 0) 4171 return t_mod_start(m, pos); 4172 iter->flags |= FTRACE_ITER_PRINTALL; 4173 /* reset in case of seek/pread */ 4174 iter->flags &= ~FTRACE_ITER_PROBE; 4175 return iter; 4176 } 4177 4178 if (iter->flags & FTRACE_ITER_MOD) 4179 return t_mod_start(m, pos); 4180 4181 /* 4182 * Unfortunately, we need to restart at ftrace_pages_start 4183 * every time we let go of the ftrace_mutex. This is because 4184 * those pointers can change without the lock. 4185 */ 4186 iter->pg = ftrace_pages_start; 4187 iter->idx = 0; 4188 for (l = 0; l <= *pos; ) { 4189 p = t_func_next(m, &l); 4190 if (!p) 4191 break; 4192 } 4193 4194 if (!p) 4195 return t_mod_start(m, pos); 4196 4197 return iter; 4198 } 4199 4200 static void t_stop(struct seq_file *m, void *p) 4201 { 4202 mutex_unlock(&ftrace_lock); 4203 } 4204 4205 void * __weak 4206 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 4207 { 4208 return NULL; 4209 } 4210 4211 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 4212 struct dyn_ftrace *rec) 4213 { 4214 void *ptr; 4215 4216 ptr = arch_ftrace_trampoline_func(ops, rec); 4217 if (ptr) 4218 seq_printf(m, " ->%pS", ptr); 4219 } 4220 4221 #ifdef FTRACE_MCOUNT_MAX_OFFSET 4222 /* 4223 * Weak functions can still have an mcount/fentry that is saved in 4224 * the __mcount_loc section. These can be detected by having a 4225 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the 4226 * symbol found by kallsyms is not the function that the mcount/fentry 4227 * is part of. The offset is much greater in these cases. 4228 * 4229 * Test the record to make sure that the ip points to a valid kallsyms 4230 * and if not, mark it disabled. 4231 */ 4232 static int test_for_valid_rec(struct dyn_ftrace *rec) 4233 { 4234 char str[KSYM_SYMBOL_LEN]; 4235 unsigned long offset; 4236 const char *ret; 4237 4238 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); 4239 4240 /* Weak functions can cause invalid addresses */ 4241 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 4242 rec->flags |= FTRACE_FL_DISABLED; 4243 return 0; 4244 } 4245 return 1; 4246 } 4247 4248 static struct workqueue_struct *ftrace_check_wq __initdata; 4249 static struct work_struct ftrace_check_work __initdata; 4250 4251 /* 4252 * Scan all the mcount/fentry entries to make sure they are valid. 4253 */ 4254 static __init void ftrace_check_work_func(struct work_struct *work) 4255 { 4256 struct ftrace_page *pg; 4257 struct dyn_ftrace *rec; 4258 4259 mutex_lock(&ftrace_lock); 4260 do_for_each_ftrace_rec(pg, rec) { 4261 test_for_valid_rec(rec); 4262 } while_for_each_ftrace_rec(); 4263 mutex_unlock(&ftrace_lock); 4264 } 4265 4266 static int __init ftrace_check_for_weak_functions(void) 4267 { 4268 INIT_WORK(&ftrace_check_work, ftrace_check_work_func); 4269 4270 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0); 4271 4272 queue_work(ftrace_check_wq, &ftrace_check_work); 4273 return 0; 4274 } 4275 4276 static int __init ftrace_check_sync(void) 4277 { 4278 /* Make sure the ftrace_check updates are finished */ 4279 if (ftrace_check_wq) 4280 destroy_workqueue(ftrace_check_wq); 4281 return 0; 4282 } 4283 4284 late_initcall_sync(ftrace_check_sync); 4285 subsys_initcall(ftrace_check_for_weak_functions); 4286 4287 static int print_rec(struct seq_file *m, unsigned long ip) 4288 { 4289 unsigned long offset; 4290 char str[KSYM_SYMBOL_LEN]; 4291 char *modname; 4292 const char *ret; 4293 4294 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str); 4295 /* Weak functions can cause invalid addresses */ 4296 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 4297 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld", 4298 FTRACE_INVALID_FUNCTION, offset); 4299 ret = NULL; 4300 } 4301 4302 seq_puts(m, str); 4303 if (modname) 4304 seq_printf(m, " [%s]", modname); 4305 return ret == NULL ? -1 : 0; 4306 } 4307 #else 4308 static inline int test_for_valid_rec(struct dyn_ftrace *rec) 4309 { 4310 return 1; 4311 } 4312 4313 static inline int print_rec(struct seq_file *m, unsigned long ip) 4314 { 4315 seq_printf(m, "%ps", (void *)ip); 4316 return 0; 4317 } 4318 #endif 4319 4320 static int t_show(struct seq_file *m, void *v) 4321 { 4322 struct ftrace_iterator *iter = m->private; 4323 struct dyn_ftrace *rec; 4324 4325 if (iter->flags & FTRACE_ITER_PROBE) 4326 return t_probe_show(m, iter); 4327 4328 if (iter->flags & FTRACE_ITER_MOD) 4329 return t_mod_show(m, iter); 4330 4331 if (iter->flags & FTRACE_ITER_PRINTALL) { 4332 if (iter->flags & FTRACE_ITER_NOTRACE) 4333 seq_puts(m, "#### no functions disabled ####\n"); 4334 else 4335 seq_puts(m, "#### all functions enabled ####\n"); 4336 return 0; 4337 } 4338 4339 rec = iter->func; 4340 4341 if (!rec) 4342 return 0; 4343 4344 if (iter->flags & FTRACE_ITER_ADDRS) 4345 seq_printf(m, "%lx ", rec->ip); 4346 4347 if (print_rec(m, rec->ip)) { 4348 /* This should only happen when a rec is disabled */ 4349 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); 4350 seq_putc(m, '\n'); 4351 return 0; 4352 } 4353 4354 if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { 4355 struct ftrace_ops *ops; 4356 4357 seq_printf(m, " (%ld)%s%s%s%s%s", 4358 ftrace_rec_count(rec), 4359 rec->flags & FTRACE_FL_REGS ? " R" : " ", 4360 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", 4361 rec->flags & FTRACE_FL_DIRECT ? " D" : " ", 4362 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ", 4363 rec->flags & FTRACE_FL_MODIFIED ? " M " : " "); 4364 if (rec->flags & FTRACE_FL_TRAMP_EN) { 4365 ops = ftrace_find_tramp_ops_any(rec); 4366 if (ops) { 4367 do { 4368 seq_printf(m, "\ttramp: %pS (%pS)", 4369 (void *)ops->trampoline, 4370 (void *)ops->func); 4371 add_trampoline_func(m, ops, rec); 4372 ops = ftrace_find_tramp_ops_next(rec, ops); 4373 } while (ops); 4374 } else 4375 seq_puts(m, "\ttramp: ERROR!"); 4376 } else { 4377 add_trampoline_func(m, NULL, rec); 4378 } 4379 if (rec->flags & FTRACE_FL_CALL_OPS_EN) { 4380 ops = ftrace_find_unique_ops(rec); 4381 if (ops) { 4382 seq_printf(m, "\tops: %pS (%pS)", 4383 ops, ops->func); 4384 } else { 4385 seq_puts(m, "\tops: ERROR!"); 4386 } 4387 } 4388 if (rec->flags & FTRACE_FL_DIRECT) { 4389 unsigned long direct; 4390 4391 direct = ftrace_find_rec_direct(rec->ip); 4392 if (direct) 4393 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); 4394 } 4395 } 4396 4397 seq_putc(m, '\n'); 4398 4399 return 0; 4400 } 4401 4402 static const struct seq_operations show_ftrace_seq_ops = { 4403 .start = t_start, 4404 .next = t_next, 4405 .stop = t_stop, 4406 .show = t_show, 4407 }; 4408 4409 static int 4410 ftrace_avail_open(struct inode *inode, struct file *file) 4411 { 4412 struct ftrace_iterator *iter; 4413 int ret; 4414 4415 ret = security_locked_down(LOCKDOWN_TRACEFS); 4416 if (ret) 4417 return ret; 4418 4419 if (unlikely(ftrace_disabled)) 4420 return -ENODEV; 4421 4422 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 4423 if (!iter) 4424 return -ENOMEM; 4425 4426 iter->pg = ftrace_pages_start; 4427 iter->ops = &global_ops; 4428 4429 return 0; 4430 } 4431 4432 static int 4433 ftrace_enabled_open(struct inode *inode, struct file *file) 4434 { 4435 struct ftrace_iterator *iter; 4436 4437 /* 4438 * This shows us what functions are currently being 4439 * traced and by what. Not sure if we want lockdown 4440 * to hide such critical information for an admin. 4441 * Although, perhaps it can show information we don't 4442 * want people to see, but if something is tracing 4443 * something, we probably want to know about it. 4444 */ 4445 4446 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 4447 if (!iter) 4448 return -ENOMEM; 4449 4450 iter->pg = ftrace_pages_start; 4451 iter->flags = FTRACE_ITER_ENABLED; 4452 iter->ops = &global_ops; 4453 4454 return 0; 4455 } 4456 4457 static int 4458 ftrace_touched_open(struct inode *inode, struct file *file) 4459 { 4460 struct ftrace_iterator *iter; 4461 4462 /* 4463 * This shows us what functions have ever been enabled 4464 * (traced, direct, patched, etc). Not sure if we want lockdown 4465 * to hide such critical information for an admin. 4466 * Although, perhaps it can show information we don't 4467 * want people to see, but if something had traced 4468 * something, we probably want to know about it. 4469 */ 4470 4471 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 4472 if (!iter) 4473 return -ENOMEM; 4474 4475 iter->pg = ftrace_pages_start; 4476 iter->flags = FTRACE_ITER_TOUCHED; 4477 iter->ops = &global_ops; 4478 4479 return 0; 4480 } 4481 4482 static int 4483 ftrace_avail_addrs_open(struct inode *inode, struct file *file) 4484 { 4485 struct ftrace_iterator *iter; 4486 int ret; 4487 4488 ret = security_locked_down(LOCKDOWN_TRACEFS); 4489 if (ret) 4490 return ret; 4491 4492 if (unlikely(ftrace_disabled)) 4493 return -ENODEV; 4494 4495 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 4496 if (!iter) 4497 return -ENOMEM; 4498 4499 iter->pg = ftrace_pages_start; 4500 iter->flags = FTRACE_ITER_ADDRS; 4501 iter->ops = &global_ops; 4502 4503 return 0; 4504 } 4505 4506 /** 4507 * ftrace_regex_open - initialize function tracer filter files 4508 * @ops: The ftrace_ops that hold the hash filters 4509 * @flag: The type of filter to process 4510 * @inode: The inode, usually passed in to your open routine 4511 * @file: The file, usually passed in to your open routine 4512 * 4513 * ftrace_regex_open() initializes the filter files for the 4514 * @ops. Depending on @flag it may process the filter hash or 4515 * the notrace hash of @ops. With this called from the open 4516 * routine, you can use ftrace_filter_write() for the write 4517 * routine if @flag has FTRACE_ITER_FILTER set, or 4518 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 4519 * tracing_lseek() should be used as the lseek routine, and 4520 * release must call ftrace_regex_release(). 4521 * 4522 * Returns: 0 on success or a negative errno value on failure 4523 */ 4524 int 4525 ftrace_regex_open(struct ftrace_ops *ops, int flag, 4526 struct inode *inode, struct file *file) 4527 { 4528 struct ftrace_iterator *iter; 4529 struct ftrace_hash *hash; 4530 struct list_head *mod_head; 4531 struct trace_array *tr = ops->private; 4532 int ret = -ENOMEM; 4533 4534 ftrace_ops_init(ops); 4535 4536 if (unlikely(ftrace_disabled)) 4537 return -ENODEV; 4538 4539 if (tracing_check_open_get_tr(tr)) 4540 return -ENODEV; 4541 4542 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4543 if (!iter) 4544 goto out; 4545 4546 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) 4547 goto out; 4548 4549 iter->ops = ops; 4550 iter->flags = flag; 4551 iter->tr = tr; 4552 4553 mutex_lock(&ops->func_hash->regex_lock); 4554 4555 if (flag & FTRACE_ITER_NOTRACE) { 4556 hash = ops->func_hash->notrace_hash; 4557 mod_head = tr ? &tr->mod_notrace : NULL; 4558 } else { 4559 hash = ops->func_hash->filter_hash; 4560 mod_head = tr ? &tr->mod_trace : NULL; 4561 } 4562 4563 iter->mod_list = mod_head; 4564 4565 if (file->f_mode & FMODE_WRITE) { 4566 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 4567 4568 if (file->f_flags & O_TRUNC) { 4569 iter->hash = alloc_ftrace_hash(size_bits); 4570 clear_ftrace_mod_list(mod_head); 4571 } else { 4572 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 4573 } 4574 4575 if (!iter->hash) { 4576 trace_parser_put(&iter->parser); 4577 goto out_unlock; 4578 } 4579 } else 4580 iter->hash = hash; 4581 4582 ret = 0; 4583 4584 if (file->f_mode & FMODE_READ) { 4585 iter->pg = ftrace_pages_start; 4586 4587 ret = seq_open(file, &show_ftrace_seq_ops); 4588 if (!ret) { 4589 struct seq_file *m = file->private_data; 4590 m->private = iter; 4591 } else { 4592 /* Failed */ 4593 free_ftrace_hash(iter->hash); 4594 trace_parser_put(&iter->parser); 4595 } 4596 } else 4597 file->private_data = iter; 4598 4599 out_unlock: 4600 mutex_unlock(&ops->func_hash->regex_lock); 4601 4602 out: 4603 if (ret) { 4604 kfree(iter); 4605 if (tr) 4606 trace_array_put(tr); 4607 } 4608 4609 return ret; 4610 } 4611 4612 static int 4613 ftrace_filter_open(struct inode *inode, struct file *file) 4614 { 4615 struct ftrace_ops *ops = inode->i_private; 4616 4617 /* Checks for tracefs lockdown */ 4618 return ftrace_regex_open(ops, 4619 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 4620 inode, file); 4621 } 4622 4623 static int 4624 ftrace_notrace_open(struct inode *inode, struct file *file) 4625 { 4626 struct ftrace_ops *ops = inode->i_private; 4627 4628 /* Checks for tracefs lockdown */ 4629 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 4630 inode, file); 4631 } 4632 4633 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 4634 struct ftrace_glob { 4635 char *search; 4636 unsigned len; 4637 int type; 4638 }; 4639 4640 /* 4641 * If symbols in an architecture don't correspond exactly to the user-visible 4642 * name of what they represent, it is possible to define this function to 4643 * perform the necessary adjustments. 4644 */ 4645 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 4646 { 4647 return str; 4648 } 4649 4650 static int ftrace_match(char *str, struct ftrace_glob *g) 4651 { 4652 int matched = 0; 4653 int slen; 4654 4655 str = arch_ftrace_match_adjust(str, g->search); 4656 4657 switch (g->type) { 4658 case MATCH_FULL: 4659 if (strcmp(str, g->search) == 0) 4660 matched = 1; 4661 break; 4662 case MATCH_FRONT_ONLY: 4663 if (strncmp(str, g->search, g->len) == 0) 4664 matched = 1; 4665 break; 4666 case MATCH_MIDDLE_ONLY: 4667 if (strstr(str, g->search)) 4668 matched = 1; 4669 break; 4670 case MATCH_END_ONLY: 4671 slen = strlen(str); 4672 if (slen >= g->len && 4673 memcmp(str + slen - g->len, g->search, g->len) == 0) 4674 matched = 1; 4675 break; 4676 case MATCH_GLOB: 4677 if (glob_match(g->search, str)) 4678 matched = 1; 4679 break; 4680 } 4681 4682 return matched; 4683 } 4684 4685 static int 4686 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 4687 { 4688 struct ftrace_func_entry *entry; 4689 int ret = 0; 4690 4691 entry = ftrace_lookup_ip(hash, rec->ip); 4692 if (clear_filter) { 4693 /* Do nothing if it doesn't exist */ 4694 if (!entry) 4695 return 0; 4696 4697 free_hash_entry(hash, entry); 4698 } else { 4699 /* Do nothing if it exists */ 4700 if (entry) 4701 return 0; 4702 if (add_hash_entry(hash, rec->ip) == NULL) 4703 ret = -ENOMEM; 4704 } 4705 return ret; 4706 } 4707 4708 static int 4709 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 4710 int clear_filter) 4711 { 4712 long index; 4713 struct ftrace_page *pg; 4714 struct dyn_ftrace *rec; 4715 4716 /* The index starts at 1 */ 4717 if (kstrtoul(func_g->search, 0, &index) || --index < 0) 4718 return 0; 4719 4720 do_for_each_ftrace_rec(pg, rec) { 4721 if (pg->index <= index) { 4722 index -= pg->index; 4723 /* this is a double loop, break goes to the next page */ 4724 break; 4725 } 4726 rec = &pg->records[index]; 4727 enter_record(hash, rec, clear_filter); 4728 return 1; 4729 } while_for_each_ftrace_rec(); 4730 return 0; 4731 } 4732 4733 #ifdef FTRACE_MCOUNT_MAX_OFFSET 4734 static int lookup_ip(unsigned long ip, char **modname, char *str) 4735 { 4736 unsigned long offset; 4737 4738 kallsyms_lookup(ip, NULL, &offset, modname, str); 4739 if (offset > FTRACE_MCOUNT_MAX_OFFSET) 4740 return -1; 4741 return 0; 4742 } 4743 #else 4744 static int lookup_ip(unsigned long ip, char **modname, char *str) 4745 { 4746 kallsyms_lookup(ip, NULL, NULL, modname, str); 4747 return 0; 4748 } 4749 #endif 4750 4751 static int 4752 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 4753 struct ftrace_glob *mod_g, int exclude_mod) 4754 { 4755 char str[KSYM_SYMBOL_LEN]; 4756 char *modname; 4757 4758 if (lookup_ip(rec->ip, &modname, str)) { 4759 /* This should only happen when a rec is disabled */ 4760 WARN_ON_ONCE(system_state == SYSTEM_RUNNING && 4761 !(rec->flags & FTRACE_FL_DISABLED)); 4762 return 0; 4763 } 4764 4765 if (mod_g) { 4766 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 4767 4768 /* blank module name to match all modules */ 4769 if (!mod_g->len) { 4770 /* blank module globbing: modname xor exclude_mod */ 4771 if (!exclude_mod != !modname) 4772 goto func_match; 4773 return 0; 4774 } 4775 4776 /* 4777 * exclude_mod is set to trace everything but the given 4778 * module. If it is set and the module matches, then 4779 * return 0. If it is not set, and the module doesn't match 4780 * also return 0. Otherwise, check the function to see if 4781 * that matches. 4782 */ 4783 if (!mod_matches == !exclude_mod) 4784 return 0; 4785 func_match: 4786 /* blank search means to match all funcs in the mod */ 4787 if (!func_g->len) 4788 return 1; 4789 } 4790 4791 return ftrace_match(str, func_g); 4792 } 4793 4794 static int 4795 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 4796 { 4797 struct ftrace_page *pg; 4798 struct dyn_ftrace *rec; 4799 struct ftrace_glob func_g = { .type = MATCH_FULL }; 4800 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 4801 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 4802 int exclude_mod = 0; 4803 int found = 0; 4804 int ret; 4805 int clear_filter = 0; 4806 4807 if (func) { 4808 func_g.type = filter_parse_regex(func, len, &func_g.search, 4809 &clear_filter); 4810 func_g.len = strlen(func_g.search); 4811 } 4812 4813 if (mod) { 4814 mod_g.type = filter_parse_regex(mod, strlen(mod), 4815 &mod_g.search, &exclude_mod); 4816 mod_g.len = strlen(mod_g.search); 4817 } 4818 4819 guard(mutex)(&ftrace_lock); 4820 4821 if (unlikely(ftrace_disabled)) 4822 return 0; 4823 4824 if (func_g.type == MATCH_INDEX) 4825 return add_rec_by_index(hash, &func_g, clear_filter); 4826 4827 do_for_each_ftrace_rec(pg, rec) { 4828 4829 if (rec->flags & FTRACE_FL_DISABLED) 4830 continue; 4831 4832 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 4833 ret = enter_record(hash, rec, clear_filter); 4834 if (ret < 0) 4835 return ret; 4836 found = 1; 4837 } 4838 cond_resched(); 4839 } while_for_each_ftrace_rec(); 4840 4841 return found; 4842 } 4843 4844 static int 4845 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 4846 { 4847 return match_records(hash, buff, len, NULL); 4848 } 4849 4850 static void ftrace_ops_update_code(struct ftrace_ops *ops, 4851 struct ftrace_ops_hash *old_hash) 4852 { 4853 struct ftrace_ops *op; 4854 4855 if (!ftrace_enabled) 4856 return; 4857 4858 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 4859 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4860 return; 4861 } 4862 4863 /* 4864 * If this is the shared global_ops filter, then we need to 4865 * check if there is another ops that shares it, is enabled. 4866 * If so, we still need to run the modify code. 4867 */ 4868 if (ops->func_hash != &global_ops.local_hash) 4869 return; 4870 4871 do_for_each_ftrace_op(op, ftrace_ops_list) { 4872 if (op->func_hash == &global_ops.local_hash && 4873 op->flags & FTRACE_OPS_FL_ENABLED) { 4874 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4875 /* Only need to do this once */ 4876 return; 4877 } 4878 } while_for_each_ftrace_op(op); 4879 } 4880 4881 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 4882 struct ftrace_hash **orig_hash, 4883 struct ftrace_hash *hash, 4884 int enable) 4885 { 4886 if (ops->flags & FTRACE_OPS_FL_SUBOP) 4887 return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); 4888 4889 /* 4890 * If this ops is not enabled, it could be sharing its filters 4891 * with a subop. If that's the case, update the subop instead of 4892 * this ops. Shared filters are only allowed to have one ops set 4893 * at a time, and if we update the ops that is not enabled, 4894 * it will not affect subops that share it. 4895 */ 4896 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { 4897 struct ftrace_ops *op; 4898 4899 /* Check if any other manager subops maps to this hash */ 4900 do_for_each_ftrace_op(op, ftrace_ops_list) { 4901 struct ftrace_ops *subops; 4902 4903 list_for_each_entry(subops, &op->subop_list, list) { 4904 if ((subops->flags & FTRACE_OPS_FL_ENABLED) && 4905 subops->func_hash == ops->func_hash) { 4906 return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable); 4907 } 4908 } 4909 } while_for_each_ftrace_op(op); 4910 } 4911 4912 return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4913 } 4914 4915 static bool module_exists(const char *module) 4916 { 4917 /* All modules have the symbol __this_module */ 4918 static const char this_mod[] = "__this_module"; 4919 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 4920 unsigned long val; 4921 int n; 4922 4923 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 4924 4925 if (n > sizeof(modname) - 1) 4926 return false; 4927 4928 val = module_kallsyms_lookup_name(modname); 4929 return val != 0; 4930 } 4931 4932 static int cache_mod(struct trace_array *tr, 4933 const char *func, char *module, int enable) 4934 { 4935 struct ftrace_mod_load *ftrace_mod, *n; 4936 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 4937 4938 guard(mutex)(&ftrace_lock); 4939 4940 /* We do not cache inverse filters */ 4941 if (func[0] == '!') { 4942 int ret = -EINVAL; 4943 4944 func++; 4945 4946 /* Look to remove this hash */ 4947 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4948 if (strcmp(ftrace_mod->module, module) != 0) 4949 continue; 4950 4951 /* no func matches all */ 4952 if (strcmp(func, "*") == 0 || 4953 (ftrace_mod->func && 4954 strcmp(ftrace_mod->func, func) == 0)) { 4955 ret = 0; 4956 free_ftrace_mod(ftrace_mod); 4957 continue; 4958 } 4959 } 4960 return ret; 4961 } 4962 4963 /* We only care about modules that have not been loaded yet */ 4964 if (module_exists(module)) 4965 return -EINVAL; 4966 4967 /* Save this string off, and execute it when the module is loaded */ 4968 return ftrace_add_mod(tr, func, module, enable); 4969 } 4970 4971 #ifdef CONFIG_MODULES 4972 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 4973 char *mod, bool enable) 4974 { 4975 struct ftrace_mod_load *ftrace_mod, *n; 4976 struct ftrace_hash **orig_hash, *new_hash; 4977 LIST_HEAD(process_mods); 4978 char *func; 4979 4980 mutex_lock(&ops->func_hash->regex_lock); 4981 4982 if (enable) 4983 orig_hash = &ops->func_hash->filter_hash; 4984 else 4985 orig_hash = &ops->func_hash->notrace_hash; 4986 4987 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 4988 *orig_hash); 4989 if (!new_hash) 4990 goto out; /* warn? */ 4991 4992 mutex_lock(&ftrace_lock); 4993 4994 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4995 4996 if (strcmp(ftrace_mod->module, mod) != 0) 4997 continue; 4998 4999 if (ftrace_mod->func) 5000 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 5001 else 5002 func = kstrdup("*", GFP_KERNEL); 5003 5004 if (!func) /* warn? */ 5005 continue; 5006 5007 list_move(&ftrace_mod->list, &process_mods); 5008 5009 /* Use the newly allocated func, as it may be "*" */ 5010 kfree(ftrace_mod->func); 5011 ftrace_mod->func = func; 5012 } 5013 5014 mutex_unlock(&ftrace_lock); 5015 5016 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 5017 5018 func = ftrace_mod->func; 5019 5020 /* Grabs ftrace_lock, which is why we have this extra step */ 5021 match_records(new_hash, func, strlen(func), mod); 5022 free_ftrace_mod(ftrace_mod); 5023 } 5024 5025 if (enable && list_empty(head)) 5026 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 5027 5028 mutex_lock(&ftrace_lock); 5029 5030 ftrace_hash_move_and_update_ops(ops, orig_hash, 5031 new_hash, enable); 5032 mutex_unlock(&ftrace_lock); 5033 5034 out: 5035 mutex_unlock(&ops->func_hash->regex_lock); 5036 5037 free_ftrace_hash(new_hash); 5038 } 5039 5040 static void process_cached_mods(const char *mod_name) 5041 { 5042 struct trace_array *tr; 5043 char *mod; 5044 5045 mod = kstrdup(mod_name, GFP_KERNEL); 5046 if (!mod) 5047 return; 5048 5049 mutex_lock(&trace_types_lock); 5050 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5051 if (!list_empty(&tr->mod_trace)) 5052 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 5053 if (!list_empty(&tr->mod_notrace)) 5054 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 5055 } 5056 mutex_unlock(&trace_types_lock); 5057 5058 kfree(mod); 5059 } 5060 #endif 5061 5062 /* 5063 * We register the module command as a template to show others how 5064 * to register the a command as well. 5065 */ 5066 5067 static int 5068 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 5069 char *func_orig, char *cmd, char *module, int enable) 5070 { 5071 char *func; 5072 int ret; 5073 5074 if (!tr) 5075 return -ENODEV; 5076 5077 /* match_records() modifies func, and we need the original */ 5078 func = kstrdup(func_orig, GFP_KERNEL); 5079 if (!func) 5080 return -ENOMEM; 5081 5082 /* 5083 * cmd == 'mod' because we only registered this func 5084 * for the 'mod' ftrace_func_command. 5085 * But if you register one func with multiple commands, 5086 * you can tell which command was used by the cmd 5087 * parameter. 5088 */ 5089 ret = match_records(hash, func, strlen(func), module); 5090 kfree(func); 5091 5092 if (!ret) 5093 return cache_mod(tr, func_orig, module, enable); 5094 if (ret < 0) 5095 return ret; 5096 return 0; 5097 } 5098 5099 static struct ftrace_func_command ftrace_mod_cmd = { 5100 .name = "mod", 5101 .func = ftrace_mod_callback, 5102 }; 5103 5104 static int __init ftrace_mod_cmd_init(void) 5105 { 5106 return register_ftrace_command(&ftrace_mod_cmd); 5107 } 5108 core_initcall(ftrace_mod_cmd_init); 5109 5110 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 5111 struct ftrace_ops *op, struct ftrace_regs *fregs) 5112 { 5113 struct ftrace_probe_ops *probe_ops; 5114 struct ftrace_func_probe *probe; 5115 5116 probe = container_of(op, struct ftrace_func_probe, ops); 5117 probe_ops = probe->probe_ops; 5118 5119 /* 5120 * Disable preemption for these calls to prevent a RCU grace 5121 * period. This syncs the hash iteration and freeing of items 5122 * on the hash. rcu_read_lock is too dangerous here. 5123 */ 5124 preempt_disable_notrace(); 5125 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 5126 preempt_enable_notrace(); 5127 } 5128 5129 struct ftrace_func_map { 5130 struct ftrace_func_entry entry; 5131 void *data; 5132 }; 5133 5134 struct ftrace_func_mapper { 5135 struct ftrace_hash hash; 5136 }; 5137 5138 /** 5139 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 5140 * 5141 * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data. 5142 */ 5143 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 5144 { 5145 struct ftrace_hash *hash; 5146 5147 /* 5148 * The mapper is simply a ftrace_hash, but since the entries 5149 * in the hash are not ftrace_func_entry type, we define it 5150 * as a separate structure. 5151 */ 5152 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 5153 return (struct ftrace_func_mapper *)hash; 5154 } 5155 5156 /** 5157 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 5158 * @mapper: The mapper that has the ip maps 5159 * @ip: the instruction pointer to find the data for 5160 * 5161 * Returns: the data mapped to @ip if found otherwise NULL. The return 5162 * is actually the address of the mapper data pointer. The address is 5163 * returned for use cases where the data is no bigger than a long, and 5164 * the user can use the data pointer as its data instead of having to 5165 * allocate more memory for the reference. 5166 */ 5167 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 5168 unsigned long ip) 5169 { 5170 struct ftrace_func_entry *entry; 5171 struct ftrace_func_map *map; 5172 5173 entry = ftrace_lookup_ip(&mapper->hash, ip); 5174 if (!entry) 5175 return NULL; 5176 5177 map = (struct ftrace_func_map *)entry; 5178 return &map->data; 5179 } 5180 5181 /** 5182 * ftrace_func_mapper_add_ip - Map some data to an ip 5183 * @mapper: The mapper that has the ip maps 5184 * @ip: The instruction pointer address to map @data to 5185 * @data: The data to map to @ip 5186 * 5187 * Returns: 0 on success otherwise an error. 5188 */ 5189 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 5190 unsigned long ip, void *data) 5191 { 5192 struct ftrace_func_entry *entry; 5193 struct ftrace_func_map *map; 5194 5195 entry = ftrace_lookup_ip(&mapper->hash, ip); 5196 if (entry) 5197 return -EBUSY; 5198 5199 map = kmalloc(sizeof(*map), GFP_KERNEL); 5200 if (!map) 5201 return -ENOMEM; 5202 5203 map->entry.ip = ip; 5204 map->data = data; 5205 5206 __add_hash_entry(&mapper->hash, &map->entry); 5207 5208 return 0; 5209 } 5210 5211 /** 5212 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 5213 * @mapper: The mapper that has the ip maps 5214 * @ip: The instruction pointer address to remove the data from 5215 * 5216 * Returns: the data if it is found, otherwise NULL. 5217 * Note, if the data pointer is used as the data itself, (see 5218 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 5219 * if the data pointer was set to zero. 5220 */ 5221 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 5222 unsigned long ip) 5223 { 5224 struct ftrace_func_entry *entry; 5225 struct ftrace_func_map *map; 5226 void *data; 5227 5228 entry = ftrace_lookup_ip(&mapper->hash, ip); 5229 if (!entry) 5230 return NULL; 5231 5232 map = (struct ftrace_func_map *)entry; 5233 data = map->data; 5234 5235 remove_hash_entry(&mapper->hash, entry); 5236 kfree(entry); 5237 5238 return data; 5239 } 5240 5241 /** 5242 * free_ftrace_func_mapper - free a mapping of ips and data 5243 * @mapper: The mapper that has the ip maps 5244 * @free_func: A function to be called on each data item. 5245 * 5246 * This is used to free the function mapper. The @free_func is optional 5247 * and can be used if the data needs to be freed as well. 5248 */ 5249 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 5250 ftrace_mapper_func free_func) 5251 { 5252 struct ftrace_func_entry *entry; 5253 struct ftrace_func_map *map; 5254 struct hlist_head *hhd; 5255 int size, i; 5256 5257 if (!mapper) 5258 return; 5259 5260 if (free_func && mapper->hash.count) { 5261 size = 1 << mapper->hash.size_bits; 5262 for (i = 0; i < size; i++) { 5263 hhd = &mapper->hash.buckets[i]; 5264 hlist_for_each_entry(entry, hhd, hlist) { 5265 map = (struct ftrace_func_map *)entry; 5266 free_func(map); 5267 } 5268 } 5269 } 5270 free_ftrace_hash(&mapper->hash); 5271 } 5272 5273 static void release_probe(struct ftrace_func_probe *probe) 5274 { 5275 struct ftrace_probe_ops *probe_ops; 5276 5277 guard(mutex)(&ftrace_lock); 5278 5279 WARN_ON(probe->ref <= 0); 5280 5281 /* Subtract the ref that was used to protect this instance */ 5282 probe->ref--; 5283 5284 if (!probe->ref) { 5285 probe_ops = probe->probe_ops; 5286 /* 5287 * Sending zero as ip tells probe_ops to free 5288 * the probe->data itself 5289 */ 5290 if (probe_ops->free) 5291 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 5292 list_del(&probe->list); 5293 kfree(probe); 5294 } 5295 } 5296 5297 static void acquire_probe_locked(struct ftrace_func_probe *probe) 5298 { 5299 /* 5300 * Add one ref to keep it from being freed when releasing the 5301 * ftrace_lock mutex. 5302 */ 5303 probe->ref++; 5304 } 5305 5306 int 5307 register_ftrace_function_probe(char *glob, struct trace_array *tr, 5308 struct ftrace_probe_ops *probe_ops, 5309 void *data) 5310 { 5311 struct ftrace_func_probe *probe = NULL, *iter; 5312 struct ftrace_func_entry *entry; 5313 struct ftrace_hash **orig_hash; 5314 struct ftrace_hash *old_hash; 5315 struct ftrace_hash *hash; 5316 int count = 0; 5317 int size; 5318 int ret; 5319 int i; 5320 5321 if (WARN_ON(!tr)) 5322 return -EINVAL; 5323 5324 /* We do not support '!' for function probes */ 5325 if (WARN_ON(glob[0] == '!')) 5326 return -EINVAL; 5327 5328 5329 mutex_lock(&ftrace_lock); 5330 /* Check if the probe_ops is already registered */ 5331 list_for_each_entry(iter, &tr->func_probes, list) { 5332 if (iter->probe_ops == probe_ops) { 5333 probe = iter; 5334 break; 5335 } 5336 } 5337 if (!probe) { 5338 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 5339 if (!probe) { 5340 mutex_unlock(&ftrace_lock); 5341 return -ENOMEM; 5342 } 5343 probe->probe_ops = probe_ops; 5344 probe->ops.func = function_trace_probe_call; 5345 probe->tr = tr; 5346 ftrace_ops_init(&probe->ops); 5347 list_add(&probe->list, &tr->func_probes); 5348 } 5349 5350 acquire_probe_locked(probe); 5351 5352 mutex_unlock(&ftrace_lock); 5353 5354 /* 5355 * Note, there's a small window here that the func_hash->filter_hash 5356 * may be NULL or empty. Need to be careful when reading the loop. 5357 */ 5358 mutex_lock(&probe->ops.func_hash->regex_lock); 5359 5360 orig_hash = &probe->ops.func_hash->filter_hash; 5361 old_hash = *orig_hash; 5362 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 5363 5364 if (!hash) { 5365 ret = -ENOMEM; 5366 goto out; 5367 } 5368 5369 ret = ftrace_match_records(hash, glob, strlen(glob)); 5370 5371 /* Nothing found? */ 5372 if (!ret) 5373 ret = -EINVAL; 5374 5375 if (ret < 0) 5376 goto out; 5377 5378 size = 1 << hash->size_bits; 5379 for (i = 0; i < size; i++) { 5380 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5381 if (ftrace_lookup_ip(old_hash, entry->ip)) 5382 continue; 5383 /* 5384 * The caller might want to do something special 5385 * for each function we find. We call the callback 5386 * to give the caller an opportunity to do so. 5387 */ 5388 if (probe_ops->init) { 5389 ret = probe_ops->init(probe_ops, tr, 5390 entry->ip, data, 5391 &probe->data); 5392 if (ret < 0) { 5393 if (probe_ops->free && count) 5394 probe_ops->free(probe_ops, tr, 5395 0, probe->data); 5396 probe->data = NULL; 5397 goto out; 5398 } 5399 } 5400 count++; 5401 } 5402 } 5403 5404 mutex_lock(&ftrace_lock); 5405 5406 if (!count) { 5407 /* Nothing was added? */ 5408 ret = -EINVAL; 5409 goto out_unlock; 5410 } 5411 5412 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 5413 hash, 1); 5414 if (ret < 0) 5415 goto err_unlock; 5416 5417 /* One ref for each new function traced */ 5418 probe->ref += count; 5419 5420 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 5421 ret = ftrace_startup(&probe->ops, 0); 5422 5423 out_unlock: 5424 mutex_unlock(&ftrace_lock); 5425 5426 if (!ret) 5427 ret = count; 5428 out: 5429 mutex_unlock(&probe->ops.func_hash->regex_lock); 5430 free_ftrace_hash(hash); 5431 5432 release_probe(probe); 5433 5434 return ret; 5435 5436 err_unlock: 5437 if (!probe_ops->free || !count) 5438 goto out_unlock; 5439 5440 /* Failed to do the move, need to call the free functions */ 5441 for (i = 0; i < size; i++) { 5442 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5443 if (ftrace_lookup_ip(old_hash, entry->ip)) 5444 continue; 5445 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 5446 } 5447 } 5448 goto out_unlock; 5449 } 5450 5451 int 5452 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 5453 struct ftrace_probe_ops *probe_ops) 5454 { 5455 struct ftrace_func_probe *probe = NULL, *iter; 5456 struct ftrace_ops_hash old_hash_ops; 5457 struct ftrace_func_entry *entry; 5458 struct ftrace_glob func_g; 5459 struct ftrace_hash **orig_hash; 5460 struct ftrace_hash *old_hash; 5461 struct ftrace_hash *hash = NULL; 5462 struct hlist_node *tmp; 5463 struct hlist_head hhd; 5464 char str[KSYM_SYMBOL_LEN]; 5465 int count = 0; 5466 int i, ret = -ENODEV; 5467 int size; 5468 5469 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 5470 func_g.search = NULL; 5471 else { 5472 int not; 5473 5474 func_g.type = filter_parse_regex(glob, strlen(glob), 5475 &func_g.search, ¬); 5476 func_g.len = strlen(func_g.search); 5477 5478 /* we do not support '!' for function probes */ 5479 if (WARN_ON(not)) 5480 return -EINVAL; 5481 } 5482 5483 mutex_lock(&ftrace_lock); 5484 /* Check if the probe_ops is already registered */ 5485 list_for_each_entry(iter, &tr->func_probes, list) { 5486 if (iter->probe_ops == probe_ops) { 5487 probe = iter; 5488 break; 5489 } 5490 } 5491 if (!probe) 5492 goto err_unlock_ftrace; 5493 5494 ret = -EINVAL; 5495 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 5496 goto err_unlock_ftrace; 5497 5498 acquire_probe_locked(probe); 5499 5500 mutex_unlock(&ftrace_lock); 5501 5502 mutex_lock(&probe->ops.func_hash->regex_lock); 5503 5504 orig_hash = &probe->ops.func_hash->filter_hash; 5505 old_hash = *orig_hash; 5506 5507 if (ftrace_hash_empty(old_hash)) 5508 goto out_unlock; 5509 5510 old_hash_ops.filter_hash = old_hash; 5511 /* Probes only have filters */ 5512 old_hash_ops.notrace_hash = NULL; 5513 5514 ret = -ENOMEM; 5515 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 5516 if (!hash) 5517 goto out_unlock; 5518 5519 INIT_HLIST_HEAD(&hhd); 5520 5521 size = 1 << hash->size_bits; 5522 for (i = 0; i < size; i++) { 5523 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 5524 5525 if (func_g.search) { 5526 kallsyms_lookup(entry->ip, NULL, NULL, 5527 NULL, str); 5528 if (!ftrace_match(str, &func_g)) 5529 continue; 5530 } 5531 count++; 5532 remove_hash_entry(hash, entry); 5533 hlist_add_head(&entry->hlist, &hhd); 5534 } 5535 } 5536 5537 /* Nothing found? */ 5538 if (!count) { 5539 ret = -EINVAL; 5540 goto out_unlock; 5541 } 5542 5543 mutex_lock(&ftrace_lock); 5544 5545 WARN_ON(probe->ref < count); 5546 5547 probe->ref -= count; 5548 5549 if (ftrace_hash_empty(hash)) 5550 ftrace_shutdown(&probe->ops, 0); 5551 5552 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 5553 hash, 1); 5554 5555 /* still need to update the function call sites */ 5556 if (ftrace_enabled && !ftrace_hash_empty(hash)) 5557 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 5558 &old_hash_ops); 5559 synchronize_rcu(); 5560 5561 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 5562 hlist_del(&entry->hlist); 5563 if (probe_ops->free) 5564 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 5565 kfree(entry); 5566 } 5567 mutex_unlock(&ftrace_lock); 5568 5569 out_unlock: 5570 mutex_unlock(&probe->ops.func_hash->regex_lock); 5571 free_ftrace_hash(hash); 5572 5573 release_probe(probe); 5574 5575 return ret; 5576 5577 err_unlock_ftrace: 5578 mutex_unlock(&ftrace_lock); 5579 return ret; 5580 } 5581 5582 void clear_ftrace_function_probes(struct trace_array *tr) 5583 { 5584 struct ftrace_func_probe *probe, *n; 5585 5586 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 5587 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 5588 } 5589 5590 static LIST_HEAD(ftrace_commands); 5591 static DEFINE_MUTEX(ftrace_cmd_mutex); 5592 5593 /* 5594 * Currently we only register ftrace commands from __init, so mark this 5595 * __init too. 5596 */ 5597 __init int register_ftrace_command(struct ftrace_func_command *cmd) 5598 { 5599 struct ftrace_func_command *p; 5600 5601 guard(mutex)(&ftrace_cmd_mutex); 5602 list_for_each_entry(p, &ftrace_commands, list) { 5603 if (strcmp(cmd->name, p->name) == 0) 5604 return -EBUSY; 5605 } 5606 list_add(&cmd->list, &ftrace_commands); 5607 5608 return 0; 5609 } 5610 5611 /* 5612 * Currently we only unregister ftrace commands from __init, so mark 5613 * this __init too. 5614 */ 5615 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 5616 { 5617 struct ftrace_func_command *p, *n; 5618 5619 guard(mutex)(&ftrace_cmd_mutex); 5620 5621 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 5622 if (strcmp(cmd->name, p->name) == 0) { 5623 list_del_init(&p->list); 5624 return 0; 5625 } 5626 } 5627 5628 return -ENODEV; 5629 } 5630 5631 static int ftrace_process_regex(struct ftrace_iterator *iter, 5632 char *buff, int len, int enable) 5633 { 5634 struct ftrace_hash *hash = iter->hash; 5635 struct trace_array *tr = iter->ops->private; 5636 char *func, *command, *next = buff; 5637 struct ftrace_func_command *p; 5638 int ret; 5639 5640 func = strsep(&next, ":"); 5641 5642 if (!next) { 5643 ret = ftrace_match_records(hash, func, len); 5644 if (!ret) 5645 ret = -EINVAL; 5646 if (ret < 0) 5647 return ret; 5648 return 0; 5649 } 5650 5651 /* command found */ 5652 5653 command = strsep(&next, ":"); 5654 5655 guard(mutex)(&ftrace_cmd_mutex); 5656 5657 list_for_each_entry(p, &ftrace_commands, list) { 5658 if (strcmp(p->name, command) == 0) 5659 return p->func(tr, hash, func, command, next, enable); 5660 } 5661 5662 return -EINVAL; 5663 } 5664 5665 static ssize_t 5666 ftrace_regex_write(struct file *file, const char __user *ubuf, 5667 size_t cnt, loff_t *ppos, int enable) 5668 { 5669 struct ftrace_iterator *iter; 5670 struct trace_parser *parser; 5671 ssize_t ret, read; 5672 5673 if (!cnt) 5674 return 0; 5675 5676 if (file->f_mode & FMODE_READ) { 5677 struct seq_file *m = file->private_data; 5678 iter = m->private; 5679 } else 5680 iter = file->private_data; 5681 5682 if (unlikely(ftrace_disabled)) 5683 return -ENODEV; 5684 5685 /* iter->hash is a local copy, so we don't need regex_lock */ 5686 5687 parser = &iter->parser; 5688 read = trace_get_user(parser, ubuf, cnt, ppos); 5689 5690 if (read >= 0 && trace_parser_loaded(parser) && 5691 !trace_parser_cont(parser)) { 5692 ret = ftrace_process_regex(iter, parser->buffer, 5693 parser->idx, enable); 5694 trace_parser_clear(parser); 5695 if (ret < 0) 5696 return ret; 5697 } 5698 5699 return read; 5700 } 5701 5702 ssize_t 5703 ftrace_filter_write(struct file *file, const char __user *ubuf, 5704 size_t cnt, loff_t *ppos) 5705 { 5706 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 5707 } 5708 5709 ssize_t 5710 ftrace_notrace_write(struct file *file, const char __user *ubuf, 5711 size_t cnt, loff_t *ppos) 5712 { 5713 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 5714 } 5715 5716 static int 5717 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 5718 { 5719 struct ftrace_func_entry *entry; 5720 5721 ip = ftrace_location(ip); 5722 if (!ip) 5723 return -EINVAL; 5724 5725 if (remove) { 5726 entry = ftrace_lookup_ip(hash, ip); 5727 if (!entry) 5728 return -ENOENT; 5729 free_hash_entry(hash, entry); 5730 return 0; 5731 } 5732 5733 entry = add_hash_entry(hash, ip); 5734 return entry ? 0 : -ENOMEM; 5735 } 5736 5737 static int 5738 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, 5739 unsigned int cnt, int remove) 5740 { 5741 unsigned int i; 5742 int err; 5743 5744 for (i = 0; i < cnt; i++) { 5745 err = __ftrace_match_addr(hash, ips[i], remove); 5746 if (err) { 5747 /* 5748 * This expects the @hash is a temporary hash and if this 5749 * fails the caller must free the @hash. 5750 */ 5751 return err; 5752 } 5753 } 5754 return 0; 5755 } 5756 5757 static int 5758 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 5759 unsigned long *ips, unsigned int cnt, 5760 int remove, int reset, int enable, char *mod) 5761 { 5762 struct ftrace_hash **orig_hash; 5763 struct ftrace_hash *hash; 5764 int ret; 5765 5766 if (unlikely(ftrace_disabled)) 5767 return -ENODEV; 5768 5769 mutex_lock(&ops->func_hash->regex_lock); 5770 5771 if (enable) 5772 orig_hash = &ops->func_hash->filter_hash; 5773 else 5774 orig_hash = &ops->func_hash->notrace_hash; 5775 5776 if (reset) 5777 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 5778 else 5779 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 5780 5781 if (!hash) { 5782 ret = -ENOMEM; 5783 goto out_regex_unlock; 5784 } 5785 5786 if (buf && !match_records(hash, buf, len, mod)) { 5787 /* If this was for a module and nothing was enabled, flag it */ 5788 if (mod) 5789 (*orig_hash)->flags |= FTRACE_HASH_FL_MOD; 5790 5791 /* 5792 * Even if it is a mod, return error to let caller know 5793 * nothing was added 5794 */ 5795 ret = -EINVAL; 5796 goto out_regex_unlock; 5797 } 5798 if (ips) { 5799 ret = ftrace_match_addr(hash, ips, cnt, remove); 5800 if (ret < 0) 5801 goto out_regex_unlock; 5802 } 5803 5804 mutex_lock(&ftrace_lock); 5805 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 5806 mutex_unlock(&ftrace_lock); 5807 5808 out_regex_unlock: 5809 mutex_unlock(&ops->func_hash->regex_lock); 5810 5811 free_ftrace_hash(hash); 5812 return ret; 5813 } 5814 5815 static int 5816 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, 5817 int remove, int reset, int enable) 5818 { 5819 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable, NULL); 5820 } 5821 5822 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 5823 5824 static int register_ftrace_function_nolock(struct ftrace_ops *ops); 5825 5826 /* 5827 * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct 5828 * call will be jumped from ftrace_regs_caller. Only if the architecture does 5829 * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it 5830 * jumps from ftrace_caller for multiple ftrace_ops. 5831 */ 5832 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 5833 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) 5834 #else 5835 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) 5836 #endif 5837 5838 static int check_direct_multi(struct ftrace_ops *ops) 5839 { 5840 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5841 return -EINVAL; 5842 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) 5843 return -EINVAL; 5844 return 0; 5845 } 5846 5847 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) 5848 { 5849 struct ftrace_func_entry *entry, *del; 5850 int size, i; 5851 5852 size = 1 << hash->size_bits; 5853 for (i = 0; i < size; i++) { 5854 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5855 del = __ftrace_lookup_ip(direct_functions, entry->ip); 5856 if (del && del->direct == addr) { 5857 remove_hash_entry(direct_functions, del); 5858 kfree(del); 5859 } 5860 } 5861 } 5862 } 5863 5864 static void register_ftrace_direct_cb(struct rcu_head *rhp) 5865 { 5866 struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu); 5867 5868 free_ftrace_hash(fhp); 5869 } 5870 5871 /** 5872 * register_ftrace_direct - Call a custom trampoline directly 5873 * for multiple functions registered in @ops 5874 * @ops: The address of the struct ftrace_ops object 5875 * @addr: The address of the trampoline to call at @ops functions 5876 * 5877 * This is used to connect a direct calls to @addr from the nop locations 5878 * of the functions registered in @ops (with by ftrace_set_filter_ip 5879 * function). 5880 * 5881 * The location that it calls (@addr) must be able to handle a direct call, 5882 * and save the parameters of the function being traced, and restore them 5883 * (or inject new ones if needed), before returning. 5884 * 5885 * Returns: 5886 * 0 on success 5887 * -EINVAL - The @ops object was already registered with this call or 5888 * when there are no functions in @ops object. 5889 * -EBUSY - Another direct function is already attached (there can be only one) 5890 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5891 * -ENOMEM - There was an allocation failure. 5892 */ 5893 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 5894 { 5895 struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL; 5896 struct ftrace_func_entry *entry, *new; 5897 int err = -EBUSY, size, i; 5898 5899 if (ops->func || ops->trampoline) 5900 return -EINVAL; 5901 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5902 return -EINVAL; 5903 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5904 return -EINVAL; 5905 5906 hash = ops->func_hash->filter_hash; 5907 if (ftrace_hash_empty(hash)) 5908 return -EINVAL; 5909 5910 mutex_lock(&direct_mutex); 5911 5912 /* Make sure requested entries are not already registered.. */ 5913 size = 1 << hash->size_bits; 5914 for (i = 0; i < size; i++) { 5915 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5916 if (ftrace_find_rec_direct(entry->ip)) 5917 goto out_unlock; 5918 } 5919 } 5920 5921 err = -ENOMEM; 5922 5923 /* Make a copy hash to place the new and the old entries in */ 5924 size = hash->count + direct_functions->count; 5925 if (size > 32) 5926 size = 32; 5927 new_hash = alloc_ftrace_hash(fls(size)); 5928 if (!new_hash) 5929 goto out_unlock; 5930 5931 /* Now copy over the existing direct entries */ 5932 size = 1 << direct_functions->size_bits; 5933 for (i = 0; i < size; i++) { 5934 hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { 5935 new = add_hash_entry(new_hash, entry->ip); 5936 if (!new) 5937 goto out_unlock; 5938 new->direct = entry->direct; 5939 } 5940 } 5941 5942 /* ... and add the new entries */ 5943 size = 1 << hash->size_bits; 5944 for (i = 0; i < size; i++) { 5945 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5946 new = add_hash_entry(new_hash, entry->ip); 5947 if (!new) 5948 goto out_unlock; 5949 /* Update both the copy and the hash entry */ 5950 new->direct = addr; 5951 entry->direct = addr; 5952 } 5953 } 5954 5955 free_hash = direct_functions; 5956 rcu_assign_pointer(direct_functions, new_hash); 5957 new_hash = NULL; 5958 5959 ops->func = call_direct_funcs; 5960 ops->flags = MULTI_FLAGS; 5961 ops->trampoline = FTRACE_REGS_ADDR; 5962 ops->direct_call = addr; 5963 5964 err = register_ftrace_function_nolock(ops); 5965 5966 out_unlock: 5967 mutex_unlock(&direct_mutex); 5968 5969 if (free_hash && free_hash != EMPTY_HASH) 5970 call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); 5971 5972 if (new_hash) 5973 free_ftrace_hash(new_hash); 5974 5975 return err; 5976 } 5977 EXPORT_SYMBOL_GPL(register_ftrace_direct); 5978 5979 /** 5980 * unregister_ftrace_direct - Remove calls to custom trampoline 5981 * previously registered by register_ftrace_direct for @ops object. 5982 * @ops: The address of the struct ftrace_ops object 5983 * @addr: The address of the direct function that is called by the @ops functions 5984 * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise 5985 * 5986 * This is used to remove a direct calls to @addr from the nop locations 5987 * of the functions registered in @ops (with by ftrace_set_filter_ip 5988 * function). 5989 * 5990 * Returns: 5991 * 0 on success 5992 * -EINVAL - The @ops object was not properly registered. 5993 */ 5994 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 5995 bool free_filters) 5996 { 5997 struct ftrace_hash *hash = ops->func_hash->filter_hash; 5998 int err; 5999 6000 if (check_direct_multi(ops)) 6001 return -EINVAL; 6002 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 6003 return -EINVAL; 6004 6005 mutex_lock(&direct_mutex); 6006 err = unregister_ftrace_function(ops); 6007 remove_direct_functions_hash(hash, addr); 6008 mutex_unlock(&direct_mutex); 6009 6010 /* cleanup for possible another register call */ 6011 ops->func = NULL; 6012 ops->trampoline = 0; 6013 6014 if (free_filters) 6015 ftrace_free_filter(ops); 6016 return err; 6017 } 6018 EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 6019 6020 static int 6021 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 6022 { 6023 struct ftrace_hash *hash; 6024 struct ftrace_func_entry *entry, *iter; 6025 static struct ftrace_ops tmp_ops = { 6026 .func = ftrace_stub, 6027 .flags = FTRACE_OPS_FL_STUB, 6028 }; 6029 int i, size; 6030 int err; 6031 6032 lockdep_assert_held_once(&direct_mutex); 6033 6034 /* Enable the tmp_ops to have the same functions as the direct ops */ 6035 ftrace_ops_init(&tmp_ops); 6036 tmp_ops.func_hash = ops->func_hash; 6037 tmp_ops.direct_call = addr; 6038 6039 err = register_ftrace_function_nolock(&tmp_ops); 6040 if (err) 6041 return err; 6042 6043 /* 6044 * Now the ftrace_ops_list_func() is called to do the direct callers. 6045 * We can safely change the direct functions attached to each entry. 6046 */ 6047 mutex_lock(&ftrace_lock); 6048 6049 hash = ops->func_hash->filter_hash; 6050 size = 1 << hash->size_bits; 6051 for (i = 0; i < size; i++) { 6052 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { 6053 entry = __ftrace_lookup_ip(direct_functions, iter->ip); 6054 if (!entry) 6055 continue; 6056 entry->direct = addr; 6057 } 6058 } 6059 /* Prevent store tearing if a trampoline concurrently accesses the value */ 6060 WRITE_ONCE(ops->direct_call, addr); 6061 6062 mutex_unlock(&ftrace_lock); 6063 6064 /* Removing the tmp_ops will add the updated direct callers to the functions */ 6065 unregister_ftrace_function(&tmp_ops); 6066 6067 return err; 6068 } 6069 6070 /** 6071 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call 6072 * to call something else 6073 * @ops: The address of the struct ftrace_ops object 6074 * @addr: The address of the new trampoline to call at @ops functions 6075 * 6076 * This is used to unregister currently registered direct caller and 6077 * register new one @addr on functions registered in @ops object. 6078 * 6079 * Note there's window between ftrace_shutdown and ftrace_startup calls 6080 * where there will be no callbacks called. 6081 * 6082 * Caller should already have direct_mutex locked, so we don't lock 6083 * direct_mutex here. 6084 * 6085 * Returns: zero on success. Non zero on error, which includes: 6086 * -EINVAL - The @ops object was not properly registered. 6087 */ 6088 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) 6089 { 6090 if (check_direct_multi(ops)) 6091 return -EINVAL; 6092 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 6093 return -EINVAL; 6094 6095 return __modify_ftrace_direct(ops, addr); 6096 } 6097 EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); 6098 6099 /** 6100 * modify_ftrace_direct - Modify an existing direct 'multi' call 6101 * to call something else 6102 * @ops: The address of the struct ftrace_ops object 6103 * @addr: The address of the new trampoline to call at @ops functions 6104 * 6105 * This is used to unregister currently registered direct caller and 6106 * register new one @addr on functions registered in @ops object. 6107 * 6108 * Note there's window between ftrace_shutdown and ftrace_startup calls 6109 * where there will be no callbacks called. 6110 * 6111 * Returns: zero on success. Non zero on error, which includes: 6112 * -EINVAL - The @ops object was not properly registered. 6113 */ 6114 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 6115 { 6116 int err; 6117 6118 if (check_direct_multi(ops)) 6119 return -EINVAL; 6120 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 6121 return -EINVAL; 6122 6123 mutex_lock(&direct_mutex); 6124 err = __modify_ftrace_direct(ops, addr); 6125 mutex_unlock(&direct_mutex); 6126 return err; 6127 } 6128 EXPORT_SYMBOL_GPL(modify_ftrace_direct); 6129 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 6130 6131 /** 6132 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 6133 * @ops: the ops to set the filter with 6134 * @ip: the address to add to or remove from the filter. 6135 * @remove: non zero to remove the ip from the filter 6136 * @reset: non zero to reset all filters before applying this filter. 6137 * 6138 * Filters denote which functions should be enabled when tracing is enabled 6139 * If @ip is NULL, it fails to update filter. 6140 * 6141 * This can allocate memory which must be freed before @ops can be freed, 6142 * either by removing each filtered addr or by using 6143 * ftrace_free_filter(@ops). 6144 */ 6145 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 6146 int remove, int reset) 6147 { 6148 ftrace_ops_init(ops); 6149 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); 6150 } 6151 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 6152 6153 /** 6154 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses 6155 * @ops: the ops to set the filter with 6156 * @ips: the array of addresses to add to or remove from the filter. 6157 * @cnt: the number of addresses in @ips 6158 * @remove: non zero to remove ips from the filter 6159 * @reset: non zero to reset all filters before applying this filter. 6160 * 6161 * Filters denote which functions should be enabled when tracing is enabled 6162 * If @ips array or any ip specified within is NULL , it fails to update filter. 6163 * 6164 * This can allocate memory which must be freed before @ops can be freed, 6165 * either by removing each filtered addr or by using 6166 * ftrace_free_filter(@ops). 6167 */ 6168 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, 6169 unsigned int cnt, int remove, int reset) 6170 { 6171 ftrace_ops_init(ops); 6172 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); 6173 } 6174 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); 6175 6176 /** 6177 * ftrace_ops_set_global_filter - setup ops to use global filters 6178 * @ops: the ops which will use the global filters 6179 * 6180 * ftrace users who need global function trace filtering should call this. 6181 * It can set the global filter only if ops were not initialized before. 6182 */ 6183 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 6184 { 6185 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 6186 return; 6187 6188 ftrace_ops_init(ops); 6189 ops->func_hash = &global_ops.local_hash; 6190 } 6191 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 6192 6193 static int 6194 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 6195 int reset, int enable) 6196 { 6197 char *mod = NULL, *func, *command, *next = buf; 6198 char *tmp __free(kfree) = NULL; 6199 struct trace_array *tr = ops->private; 6200 int ret; 6201 6202 func = strsep(&next, ":"); 6203 6204 /* This can also handle :mod: parsing */ 6205 if (next) { 6206 if (!tr) 6207 return -EINVAL; 6208 6209 command = strsep(&next, ":"); 6210 if (strcmp(command, "mod") != 0) 6211 return -EINVAL; 6212 6213 mod = next; 6214 len = command - func; 6215 /* Save the original func as ftrace_set_hash() can modify it */ 6216 tmp = kstrdup(func, GFP_KERNEL); 6217 } 6218 6219 ret = ftrace_set_hash(ops, func, len, NULL, 0, 0, reset, enable, mod); 6220 6221 if (tr && mod && ret < 0) { 6222 /* Did tmp fail to allocate? */ 6223 if (!tmp) 6224 return -ENOMEM; 6225 ret = cache_mod(tr, tmp, mod, enable); 6226 } 6227 6228 return ret; 6229 } 6230 6231 /** 6232 * ftrace_set_filter - set a function to filter on in ftrace 6233 * @ops: the ops to set the filter with 6234 * @buf: the string that holds the function filter text. 6235 * @len: the length of the string. 6236 * @reset: non-zero to reset all filters before applying this filter. 6237 * 6238 * Filters denote which functions should be enabled when tracing is enabled. 6239 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 6240 * 6241 * This can allocate memory which must be freed before @ops can be freed, 6242 * either by removing each filtered addr or by using 6243 * ftrace_free_filter(@ops). 6244 */ 6245 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 6246 int len, int reset) 6247 { 6248 ftrace_ops_init(ops); 6249 return ftrace_set_regex(ops, buf, len, reset, 1); 6250 } 6251 EXPORT_SYMBOL_GPL(ftrace_set_filter); 6252 6253 /** 6254 * ftrace_set_notrace - set a function to not trace in ftrace 6255 * @ops: the ops to set the notrace filter with 6256 * @buf: the string that holds the function notrace text. 6257 * @len: the length of the string. 6258 * @reset: non-zero to reset all filters before applying this filter. 6259 * 6260 * Notrace Filters denote which functions should not be enabled when tracing 6261 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 6262 * for tracing. 6263 * 6264 * This can allocate memory which must be freed before @ops can be freed, 6265 * either by removing each filtered addr or by using 6266 * ftrace_free_filter(@ops). 6267 */ 6268 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 6269 int len, int reset) 6270 { 6271 ftrace_ops_init(ops); 6272 return ftrace_set_regex(ops, buf, len, reset, 0); 6273 } 6274 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 6275 /** 6276 * ftrace_set_global_filter - set a function to filter on with global tracers 6277 * @buf: the string that holds the function filter text. 6278 * @len: the length of the string. 6279 * @reset: non-zero to reset all filters before applying this filter. 6280 * 6281 * Filters denote which functions should be enabled when tracing is enabled. 6282 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 6283 */ 6284 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 6285 { 6286 ftrace_set_regex(&global_ops, buf, len, reset, 1); 6287 } 6288 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 6289 6290 /** 6291 * ftrace_set_global_notrace - set a function to not trace with global tracers 6292 * @buf: the string that holds the function notrace text. 6293 * @len: the length of the string. 6294 * @reset: non-zero to reset all filters before applying this filter. 6295 * 6296 * Notrace Filters denote which functions should not be enabled when tracing 6297 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 6298 * for tracing. 6299 */ 6300 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 6301 { 6302 ftrace_set_regex(&global_ops, buf, len, reset, 0); 6303 } 6304 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 6305 6306 /* 6307 * command line interface to allow users to set filters on boot up. 6308 */ 6309 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 6310 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 6311 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 6312 6313 /* Used by function selftest to not test if filter is set */ 6314 bool ftrace_filter_param __initdata; 6315 6316 static int __init set_ftrace_notrace(char *str) 6317 { 6318 ftrace_filter_param = true; 6319 strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 6320 return 1; 6321 } 6322 __setup("ftrace_notrace=", set_ftrace_notrace); 6323 6324 static int __init set_ftrace_filter(char *str) 6325 { 6326 ftrace_filter_param = true; 6327 strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 6328 return 1; 6329 } 6330 __setup("ftrace_filter=", set_ftrace_filter); 6331 6332 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6333 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 6334 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 6335 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 6336 6337 static int __init set_graph_function(char *str) 6338 { 6339 strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 6340 return 1; 6341 } 6342 __setup("ftrace_graph_filter=", set_graph_function); 6343 6344 static int __init set_graph_notrace_function(char *str) 6345 { 6346 strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 6347 return 1; 6348 } 6349 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 6350 6351 static int __init set_graph_max_depth_function(char *str) 6352 { 6353 if (!str || kstrtouint(str, 0, &fgraph_max_depth)) 6354 return 0; 6355 return 1; 6356 } 6357 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 6358 6359 static void __init set_ftrace_early_graph(char *buf, int enable) 6360 { 6361 int ret; 6362 char *func; 6363 struct ftrace_hash *hash; 6364 6365 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 6366 if (MEM_FAIL(!hash, "Failed to allocate hash\n")) 6367 return; 6368 6369 while (buf) { 6370 func = strsep(&buf, ","); 6371 /* we allow only one expression at a time */ 6372 ret = ftrace_graph_set_hash(hash, func); 6373 if (ret) 6374 printk(KERN_DEBUG "ftrace: function %s not " 6375 "traceable\n", func); 6376 } 6377 6378 if (enable) 6379 ftrace_graph_hash = hash; 6380 else 6381 ftrace_graph_notrace_hash = hash; 6382 } 6383 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6384 6385 void __init 6386 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 6387 { 6388 char *func; 6389 6390 ftrace_ops_init(ops); 6391 6392 /* The trace_array is needed for caching module function filters */ 6393 if (!ops->private) { 6394 struct trace_array *tr = trace_get_global_array(); 6395 6396 ops->private = tr; 6397 ftrace_init_trace_array(tr); 6398 } 6399 6400 while (buf) { 6401 func = strsep(&buf, ","); 6402 ftrace_set_regex(ops, func, strlen(func), 0, enable); 6403 } 6404 } 6405 6406 static void __init set_ftrace_early_filters(void) 6407 { 6408 if (ftrace_filter_buf[0]) 6409 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 6410 if (ftrace_notrace_buf[0]) 6411 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 6412 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6413 if (ftrace_graph_buf[0]) 6414 set_ftrace_early_graph(ftrace_graph_buf, 1); 6415 if (ftrace_graph_notrace_buf[0]) 6416 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 6417 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6418 } 6419 6420 int ftrace_regex_release(struct inode *inode, struct file *file) 6421 { 6422 struct seq_file *m = (struct seq_file *)file->private_data; 6423 struct ftrace_iterator *iter; 6424 struct ftrace_hash **orig_hash; 6425 struct trace_parser *parser; 6426 int filter_hash; 6427 6428 if (file->f_mode & FMODE_READ) { 6429 iter = m->private; 6430 seq_release(inode, file); 6431 } else 6432 iter = file->private_data; 6433 6434 parser = &iter->parser; 6435 if (trace_parser_loaded(parser)) { 6436 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); 6437 6438 ftrace_process_regex(iter, parser->buffer, 6439 parser->idx, enable); 6440 } 6441 6442 trace_parser_put(parser); 6443 6444 mutex_lock(&iter->ops->func_hash->regex_lock); 6445 6446 if (file->f_mode & FMODE_WRITE) { 6447 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 6448 6449 if (filter_hash) { 6450 orig_hash = &iter->ops->func_hash->filter_hash; 6451 if (iter->tr) { 6452 if (list_empty(&iter->tr->mod_trace)) 6453 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; 6454 else 6455 iter->hash->flags |= FTRACE_HASH_FL_MOD; 6456 } 6457 } else 6458 orig_hash = &iter->ops->func_hash->notrace_hash; 6459 6460 mutex_lock(&ftrace_lock); 6461 ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 6462 iter->hash, filter_hash); 6463 mutex_unlock(&ftrace_lock); 6464 } else { 6465 /* For read only, the hash is the ops hash */ 6466 iter->hash = NULL; 6467 } 6468 6469 mutex_unlock(&iter->ops->func_hash->regex_lock); 6470 free_ftrace_hash(iter->hash); 6471 if (iter->tr) 6472 trace_array_put(iter->tr); 6473 kfree(iter); 6474 6475 return 0; 6476 } 6477 6478 static const struct file_operations ftrace_avail_fops = { 6479 .open = ftrace_avail_open, 6480 .read = seq_read, 6481 .llseek = seq_lseek, 6482 .release = seq_release_private, 6483 }; 6484 6485 static const struct file_operations ftrace_enabled_fops = { 6486 .open = ftrace_enabled_open, 6487 .read = seq_read, 6488 .llseek = seq_lseek, 6489 .release = seq_release_private, 6490 }; 6491 6492 static const struct file_operations ftrace_touched_fops = { 6493 .open = ftrace_touched_open, 6494 .read = seq_read, 6495 .llseek = seq_lseek, 6496 .release = seq_release_private, 6497 }; 6498 6499 static const struct file_operations ftrace_avail_addrs_fops = { 6500 .open = ftrace_avail_addrs_open, 6501 .read = seq_read, 6502 .llseek = seq_lseek, 6503 .release = seq_release_private, 6504 }; 6505 6506 static const struct file_operations ftrace_filter_fops = { 6507 .open = ftrace_filter_open, 6508 .read = seq_read, 6509 .write = ftrace_filter_write, 6510 .llseek = tracing_lseek, 6511 .release = ftrace_regex_release, 6512 }; 6513 6514 static const struct file_operations ftrace_notrace_fops = { 6515 .open = ftrace_notrace_open, 6516 .read = seq_read, 6517 .write = ftrace_notrace_write, 6518 .llseek = tracing_lseek, 6519 .release = ftrace_regex_release, 6520 }; 6521 6522 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6523 6524 static DEFINE_MUTEX(graph_lock); 6525 6526 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; 6527 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; 6528 6529 enum graph_filter_type { 6530 GRAPH_FILTER_NOTRACE = 0, 6531 GRAPH_FILTER_FUNCTION, 6532 }; 6533 6534 #define FTRACE_GRAPH_EMPTY ((void *)1) 6535 6536 struct ftrace_graph_data { 6537 struct ftrace_hash *hash; 6538 struct ftrace_func_entry *entry; 6539 int idx; /* for hash table iteration */ 6540 enum graph_filter_type type; 6541 struct ftrace_hash *new_hash; 6542 const struct seq_operations *seq_ops; 6543 struct trace_parser parser; 6544 }; 6545 6546 static void * 6547 __g_next(struct seq_file *m, loff_t *pos) 6548 { 6549 struct ftrace_graph_data *fgd = m->private; 6550 struct ftrace_func_entry *entry = fgd->entry; 6551 struct hlist_head *head; 6552 int i, idx = fgd->idx; 6553 6554 if (*pos >= fgd->hash->count) 6555 return NULL; 6556 6557 if (entry) { 6558 hlist_for_each_entry_continue(entry, hlist) { 6559 fgd->entry = entry; 6560 return entry; 6561 } 6562 6563 idx++; 6564 } 6565 6566 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 6567 head = &fgd->hash->buckets[i]; 6568 hlist_for_each_entry(entry, head, hlist) { 6569 fgd->entry = entry; 6570 fgd->idx = i; 6571 return entry; 6572 } 6573 } 6574 return NULL; 6575 } 6576 6577 static void * 6578 g_next(struct seq_file *m, void *v, loff_t *pos) 6579 { 6580 (*pos)++; 6581 return __g_next(m, pos); 6582 } 6583 6584 static void *g_start(struct seq_file *m, loff_t *pos) 6585 { 6586 struct ftrace_graph_data *fgd = m->private; 6587 6588 mutex_lock(&graph_lock); 6589 6590 if (fgd->type == GRAPH_FILTER_FUNCTION) 6591 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6592 lockdep_is_held(&graph_lock)); 6593 else 6594 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6595 lockdep_is_held(&graph_lock)); 6596 6597 /* Nothing, tell g_show to print all functions are enabled */ 6598 if (ftrace_hash_empty(fgd->hash) && !*pos) 6599 return FTRACE_GRAPH_EMPTY; 6600 6601 fgd->idx = 0; 6602 fgd->entry = NULL; 6603 return __g_next(m, pos); 6604 } 6605 6606 static void g_stop(struct seq_file *m, void *p) 6607 { 6608 mutex_unlock(&graph_lock); 6609 } 6610 6611 static int g_show(struct seq_file *m, void *v) 6612 { 6613 struct ftrace_func_entry *entry = v; 6614 6615 if (!entry) 6616 return 0; 6617 6618 if (entry == FTRACE_GRAPH_EMPTY) { 6619 struct ftrace_graph_data *fgd = m->private; 6620 6621 if (fgd->type == GRAPH_FILTER_FUNCTION) 6622 seq_puts(m, "#### all functions enabled ####\n"); 6623 else 6624 seq_puts(m, "#### no functions disabled ####\n"); 6625 return 0; 6626 } 6627 6628 seq_printf(m, "%ps\n", (void *)entry->ip); 6629 6630 return 0; 6631 } 6632 6633 static const struct seq_operations ftrace_graph_seq_ops = { 6634 .start = g_start, 6635 .next = g_next, 6636 .stop = g_stop, 6637 .show = g_show, 6638 }; 6639 6640 static int 6641 __ftrace_graph_open(struct inode *inode, struct file *file, 6642 struct ftrace_graph_data *fgd) 6643 { 6644 int ret; 6645 struct ftrace_hash *new_hash = NULL; 6646 6647 ret = security_locked_down(LOCKDOWN_TRACEFS); 6648 if (ret) 6649 return ret; 6650 6651 if (file->f_mode & FMODE_WRITE) { 6652 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 6653 6654 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 6655 return -ENOMEM; 6656 6657 if (file->f_flags & O_TRUNC) 6658 new_hash = alloc_ftrace_hash(size_bits); 6659 else 6660 new_hash = alloc_and_copy_ftrace_hash(size_bits, 6661 fgd->hash); 6662 if (!new_hash) { 6663 ret = -ENOMEM; 6664 goto out; 6665 } 6666 } 6667 6668 if (file->f_mode & FMODE_READ) { 6669 ret = seq_open(file, &ftrace_graph_seq_ops); 6670 if (!ret) { 6671 struct seq_file *m = file->private_data; 6672 m->private = fgd; 6673 } else { 6674 /* Failed */ 6675 free_ftrace_hash(new_hash); 6676 new_hash = NULL; 6677 } 6678 } else 6679 file->private_data = fgd; 6680 6681 out: 6682 if (ret < 0 && file->f_mode & FMODE_WRITE) 6683 trace_parser_put(&fgd->parser); 6684 6685 fgd->new_hash = new_hash; 6686 6687 /* 6688 * All uses of fgd->hash must be taken with the graph_lock 6689 * held. The graph_lock is going to be released, so force 6690 * fgd->hash to be reinitialized when it is taken again. 6691 */ 6692 fgd->hash = NULL; 6693 6694 return ret; 6695 } 6696 6697 static int 6698 ftrace_graph_open(struct inode *inode, struct file *file) 6699 { 6700 struct ftrace_graph_data *fgd; 6701 int ret; 6702 6703 if (unlikely(ftrace_disabled)) 6704 return -ENODEV; 6705 6706 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6707 if (fgd == NULL) 6708 return -ENOMEM; 6709 6710 mutex_lock(&graph_lock); 6711 6712 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6713 lockdep_is_held(&graph_lock)); 6714 fgd->type = GRAPH_FILTER_FUNCTION; 6715 fgd->seq_ops = &ftrace_graph_seq_ops; 6716 6717 ret = __ftrace_graph_open(inode, file, fgd); 6718 if (ret < 0) 6719 kfree(fgd); 6720 6721 mutex_unlock(&graph_lock); 6722 return ret; 6723 } 6724 6725 static int 6726 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 6727 { 6728 struct ftrace_graph_data *fgd; 6729 int ret; 6730 6731 if (unlikely(ftrace_disabled)) 6732 return -ENODEV; 6733 6734 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6735 if (fgd == NULL) 6736 return -ENOMEM; 6737 6738 mutex_lock(&graph_lock); 6739 6740 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6741 lockdep_is_held(&graph_lock)); 6742 fgd->type = GRAPH_FILTER_NOTRACE; 6743 fgd->seq_ops = &ftrace_graph_seq_ops; 6744 6745 ret = __ftrace_graph_open(inode, file, fgd); 6746 if (ret < 0) 6747 kfree(fgd); 6748 6749 mutex_unlock(&graph_lock); 6750 return ret; 6751 } 6752 6753 static int 6754 ftrace_graph_release(struct inode *inode, struct file *file) 6755 { 6756 struct ftrace_graph_data *fgd; 6757 struct ftrace_hash *old_hash, *new_hash; 6758 struct trace_parser *parser; 6759 int ret = 0; 6760 6761 if (file->f_mode & FMODE_READ) { 6762 struct seq_file *m = file->private_data; 6763 6764 fgd = m->private; 6765 seq_release(inode, file); 6766 } else { 6767 fgd = file->private_data; 6768 } 6769 6770 6771 if (file->f_mode & FMODE_WRITE) { 6772 6773 parser = &fgd->parser; 6774 6775 if (trace_parser_loaded((parser))) { 6776 ret = ftrace_graph_set_hash(fgd->new_hash, 6777 parser->buffer); 6778 } 6779 6780 trace_parser_put(parser); 6781 6782 new_hash = __ftrace_hash_move(fgd->new_hash); 6783 if (!new_hash) { 6784 ret = -ENOMEM; 6785 goto out; 6786 } 6787 6788 mutex_lock(&graph_lock); 6789 6790 if (fgd->type == GRAPH_FILTER_FUNCTION) { 6791 old_hash = rcu_dereference_protected(ftrace_graph_hash, 6792 lockdep_is_held(&graph_lock)); 6793 rcu_assign_pointer(ftrace_graph_hash, new_hash); 6794 } else { 6795 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6796 lockdep_is_held(&graph_lock)); 6797 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 6798 } 6799 6800 mutex_unlock(&graph_lock); 6801 6802 /* 6803 * We need to do a hard force of sched synchronization. 6804 * This is because we use preempt_disable() to do RCU, but 6805 * the function tracers can be called where RCU is not watching 6806 * (like before user_exit()). We can not rely on the RCU 6807 * infrastructure to do the synchronization, thus we must do it 6808 * ourselves. 6809 */ 6810 if (old_hash != EMPTY_HASH) 6811 synchronize_rcu_tasks_rude(); 6812 6813 free_ftrace_hash(old_hash); 6814 } 6815 6816 out: 6817 free_ftrace_hash(fgd->new_hash); 6818 kfree(fgd); 6819 6820 return ret; 6821 } 6822 6823 static int 6824 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 6825 { 6826 struct ftrace_glob func_g; 6827 struct dyn_ftrace *rec; 6828 struct ftrace_page *pg; 6829 struct ftrace_func_entry *entry; 6830 int fail = 1; 6831 int not; 6832 6833 /* decode regex */ 6834 func_g.type = filter_parse_regex(buffer, strlen(buffer), 6835 &func_g.search, ¬); 6836 6837 func_g.len = strlen(func_g.search); 6838 6839 guard(mutex)(&ftrace_lock); 6840 6841 if (unlikely(ftrace_disabled)) 6842 return -ENODEV; 6843 6844 do_for_each_ftrace_rec(pg, rec) { 6845 6846 if (rec->flags & FTRACE_FL_DISABLED) 6847 continue; 6848 6849 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 6850 entry = ftrace_lookup_ip(hash, rec->ip); 6851 6852 if (!not) { 6853 fail = 0; 6854 6855 if (entry) 6856 continue; 6857 if (add_hash_entry(hash, rec->ip) == NULL) 6858 return 0; 6859 } else { 6860 if (entry) { 6861 free_hash_entry(hash, entry); 6862 fail = 0; 6863 } 6864 } 6865 } 6866 } while_for_each_ftrace_rec(); 6867 6868 return fail ? -EINVAL : 0; 6869 } 6870 6871 static ssize_t 6872 ftrace_graph_write(struct file *file, const char __user *ubuf, 6873 size_t cnt, loff_t *ppos) 6874 { 6875 ssize_t read, ret = 0; 6876 struct ftrace_graph_data *fgd = file->private_data; 6877 struct trace_parser *parser; 6878 6879 if (!cnt) 6880 return 0; 6881 6882 /* Read mode uses seq functions */ 6883 if (file->f_mode & FMODE_READ) { 6884 struct seq_file *m = file->private_data; 6885 fgd = m->private; 6886 } 6887 6888 parser = &fgd->parser; 6889 6890 read = trace_get_user(parser, ubuf, cnt, ppos); 6891 6892 if (read >= 0 && trace_parser_loaded(parser) && 6893 !trace_parser_cont(parser)) { 6894 6895 ret = ftrace_graph_set_hash(fgd->new_hash, 6896 parser->buffer); 6897 trace_parser_clear(parser); 6898 } 6899 6900 if (!ret) 6901 ret = read; 6902 6903 return ret; 6904 } 6905 6906 static const struct file_operations ftrace_graph_fops = { 6907 .open = ftrace_graph_open, 6908 .read = seq_read, 6909 .write = ftrace_graph_write, 6910 .llseek = tracing_lseek, 6911 .release = ftrace_graph_release, 6912 }; 6913 6914 static const struct file_operations ftrace_graph_notrace_fops = { 6915 .open = ftrace_graph_notrace_open, 6916 .read = seq_read, 6917 .write = ftrace_graph_write, 6918 .llseek = tracing_lseek, 6919 .release = ftrace_graph_release, 6920 }; 6921 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6922 6923 void ftrace_create_filter_files(struct ftrace_ops *ops, 6924 struct dentry *parent) 6925 { 6926 6927 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent, 6928 ops, &ftrace_filter_fops); 6929 6930 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent, 6931 ops, &ftrace_notrace_fops); 6932 } 6933 6934 /* 6935 * The name "destroy_filter_files" is really a misnomer. Although 6936 * in the future, it may actually delete the files, but this is 6937 * really intended to make sure the ops passed in are disabled 6938 * and that when this function returns, the caller is free to 6939 * free the ops. 6940 * 6941 * The "destroy" name is only to match the "create" name that this 6942 * should be paired with. 6943 */ 6944 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 6945 { 6946 mutex_lock(&ftrace_lock); 6947 if (ops->flags & FTRACE_OPS_FL_ENABLED) 6948 ftrace_shutdown(ops, 0); 6949 ops->flags |= FTRACE_OPS_FL_DELETED; 6950 ftrace_free_filter(ops); 6951 mutex_unlock(&ftrace_lock); 6952 } 6953 6954 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 6955 { 6956 6957 trace_create_file("available_filter_functions", TRACE_MODE_READ, 6958 d_tracer, NULL, &ftrace_avail_fops); 6959 6960 trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ, 6961 d_tracer, NULL, &ftrace_avail_addrs_fops); 6962 6963 trace_create_file("enabled_functions", TRACE_MODE_READ, 6964 d_tracer, NULL, &ftrace_enabled_fops); 6965 6966 trace_create_file("touched_functions", TRACE_MODE_READ, 6967 d_tracer, NULL, &ftrace_touched_fops); 6968 6969 ftrace_create_filter_files(&global_ops, d_tracer); 6970 6971 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6972 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer, 6973 NULL, 6974 &ftrace_graph_fops); 6975 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer, 6976 NULL, 6977 &ftrace_graph_notrace_fops); 6978 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6979 6980 return 0; 6981 } 6982 6983 static int ftrace_cmp_ips(const void *a, const void *b) 6984 { 6985 const unsigned long *ipa = a; 6986 const unsigned long *ipb = b; 6987 6988 if (*ipa > *ipb) 6989 return 1; 6990 if (*ipa < *ipb) 6991 return -1; 6992 return 0; 6993 } 6994 6995 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST 6996 static void test_is_sorted(unsigned long *start, unsigned long count) 6997 { 6998 int i; 6999 7000 for (i = 1; i < count; i++) { 7001 if (WARN(start[i - 1] > start[i], 7002 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i, 7003 (void *)start[i - 1], start[i - 1], 7004 (void *)start[i], start[i])) 7005 break; 7006 } 7007 if (i == count) 7008 pr_info("ftrace section at %px sorted properly\n", start); 7009 } 7010 #else 7011 static void test_is_sorted(unsigned long *start, unsigned long count) 7012 { 7013 } 7014 #endif 7015 7016 static int ftrace_process_locs(struct module *mod, 7017 unsigned long *start, 7018 unsigned long *end) 7019 { 7020 struct ftrace_page *pg_unuse = NULL; 7021 struct ftrace_page *start_pg; 7022 struct ftrace_page *pg; 7023 struct dyn_ftrace *rec; 7024 unsigned long skipped = 0; 7025 unsigned long count; 7026 unsigned long *p; 7027 unsigned long addr; 7028 unsigned long flags = 0; /* Shut up gcc */ 7029 int ret = -ENOMEM; 7030 7031 count = end - start; 7032 7033 if (!count) 7034 return 0; 7035 7036 /* 7037 * Sorting mcount in vmlinux at build time depend on 7038 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in 7039 * modules can not be sorted at build time. 7040 */ 7041 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { 7042 sort(start, count, sizeof(*start), 7043 ftrace_cmp_ips, NULL); 7044 } else { 7045 test_is_sorted(start, count); 7046 } 7047 7048 start_pg = ftrace_allocate_pages(count); 7049 if (!start_pg) 7050 return -ENOMEM; 7051 7052 mutex_lock(&ftrace_lock); 7053 7054 /* 7055 * Core and each module needs their own pages, as 7056 * modules will free them when they are removed. 7057 * Force a new page to be allocated for modules. 7058 */ 7059 if (!mod) { 7060 WARN_ON(ftrace_pages || ftrace_pages_start); 7061 /* First initialization */ 7062 ftrace_pages = ftrace_pages_start = start_pg; 7063 } else { 7064 if (!ftrace_pages) 7065 goto out; 7066 7067 if (WARN_ON(ftrace_pages->next)) { 7068 /* Hmm, we have free pages? */ 7069 while (ftrace_pages->next) 7070 ftrace_pages = ftrace_pages->next; 7071 } 7072 7073 ftrace_pages->next = start_pg; 7074 } 7075 7076 p = start; 7077 pg = start_pg; 7078 while (p < end) { 7079 unsigned long end_offset; 7080 addr = ftrace_call_adjust(*p++); 7081 /* 7082 * Some architecture linkers will pad between 7083 * the different mcount_loc sections of different 7084 * object files to satisfy alignments. 7085 * Skip any NULL pointers. 7086 */ 7087 if (!addr) { 7088 skipped++; 7089 continue; 7090 } 7091 7092 end_offset = (pg->index+1) * sizeof(pg->records[0]); 7093 if (end_offset > PAGE_SIZE << pg->order) { 7094 /* We should have allocated enough */ 7095 if (WARN_ON(!pg->next)) 7096 break; 7097 pg = pg->next; 7098 } 7099 7100 rec = &pg->records[pg->index++]; 7101 rec->ip = addr; 7102 } 7103 7104 if (pg->next) { 7105 pg_unuse = pg->next; 7106 pg->next = NULL; 7107 } 7108 7109 /* Assign the last page to ftrace_pages */ 7110 ftrace_pages = pg; 7111 7112 /* 7113 * We only need to disable interrupts on start up 7114 * because we are modifying code that an interrupt 7115 * may execute, and the modification is not atomic. 7116 * But for modules, nothing runs the code we modify 7117 * until we are finished with it, and there's no 7118 * reason to cause large interrupt latencies while we do it. 7119 */ 7120 if (!mod) 7121 local_irq_save(flags); 7122 ftrace_update_code(mod, start_pg); 7123 if (!mod) 7124 local_irq_restore(flags); 7125 ret = 0; 7126 out: 7127 mutex_unlock(&ftrace_lock); 7128 7129 /* We should have used all pages unless we skipped some */ 7130 if (pg_unuse) { 7131 WARN_ON(!skipped); 7132 /* Need to synchronize with ftrace_location_range() */ 7133 synchronize_rcu(); 7134 ftrace_free_pages(pg_unuse); 7135 } 7136 return ret; 7137 } 7138 7139 struct ftrace_mod_func { 7140 struct list_head list; 7141 char *name; 7142 unsigned long ip; 7143 unsigned int size; 7144 }; 7145 7146 struct ftrace_mod_map { 7147 struct rcu_head rcu; 7148 struct list_head list; 7149 struct module *mod; 7150 unsigned long start_addr; 7151 unsigned long end_addr; 7152 struct list_head funcs; 7153 unsigned int num_funcs; 7154 }; 7155 7156 static int ftrace_get_trampoline_kallsym(unsigned int symnum, 7157 unsigned long *value, char *type, 7158 char *name, char *module_name, 7159 int *exported) 7160 { 7161 struct ftrace_ops *op; 7162 7163 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { 7164 if (!op->trampoline || symnum--) 7165 continue; 7166 *value = op->trampoline; 7167 *type = 't'; 7168 strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); 7169 strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); 7170 *exported = 0; 7171 return 0; 7172 } 7173 7174 return -ERANGE; 7175 } 7176 7177 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) 7178 /* 7179 * Check if the current ops references the given ip. 7180 * 7181 * If the ops traces all functions, then it was already accounted for. 7182 * If the ops does not trace the current record function, skip it. 7183 * If the ops ignores the function via notrace filter, skip it. 7184 */ 7185 static bool 7186 ops_references_ip(struct ftrace_ops *ops, unsigned long ip) 7187 { 7188 /* If ops isn't enabled, ignore it */ 7189 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 7190 return false; 7191 7192 /* If ops traces all then it includes this function */ 7193 if (ops_traces_mod(ops)) 7194 return true; 7195 7196 /* The function must be in the filter */ 7197 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 7198 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) 7199 return false; 7200 7201 /* If in notrace hash, we ignore it too */ 7202 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) 7203 return false; 7204 7205 return true; 7206 } 7207 #endif 7208 7209 #ifdef CONFIG_MODULES 7210 7211 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 7212 7213 static LIST_HEAD(ftrace_mod_maps); 7214 7215 static int referenced_filters(struct dyn_ftrace *rec) 7216 { 7217 struct ftrace_ops *ops; 7218 int cnt = 0; 7219 7220 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 7221 if (ops_references_ip(ops, rec->ip)) { 7222 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) 7223 continue; 7224 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 7225 continue; 7226 cnt++; 7227 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 7228 rec->flags |= FTRACE_FL_REGS; 7229 if (cnt == 1 && ops->trampoline) 7230 rec->flags |= FTRACE_FL_TRAMP; 7231 else 7232 rec->flags &= ~FTRACE_FL_TRAMP; 7233 } 7234 } 7235 7236 return cnt; 7237 } 7238 7239 static void 7240 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 7241 { 7242 struct ftrace_func_entry *entry; 7243 struct dyn_ftrace *rec; 7244 int i; 7245 7246 if (ftrace_hash_empty(hash)) 7247 return; 7248 7249 for (i = 0; i < pg->index; i++) { 7250 rec = &pg->records[i]; 7251 entry = __ftrace_lookup_ip(hash, rec->ip); 7252 /* 7253 * Do not allow this rec to match again. 7254 * Yeah, it may waste some memory, but will be removed 7255 * if/when the hash is modified again. 7256 */ 7257 if (entry) 7258 entry->ip = 0; 7259 } 7260 } 7261 7262 /* Clear any records from hashes */ 7263 static void clear_mod_from_hashes(struct ftrace_page *pg) 7264 { 7265 struct trace_array *tr; 7266 7267 mutex_lock(&trace_types_lock); 7268 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 7269 if (!tr->ops || !tr->ops->func_hash) 7270 continue; 7271 mutex_lock(&tr->ops->func_hash->regex_lock); 7272 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 7273 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 7274 mutex_unlock(&tr->ops->func_hash->regex_lock); 7275 } 7276 mutex_unlock(&trace_types_lock); 7277 } 7278 7279 static void ftrace_free_mod_map(struct rcu_head *rcu) 7280 { 7281 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 7282 struct ftrace_mod_func *mod_func; 7283 struct ftrace_mod_func *n; 7284 7285 /* All the contents of mod_map are now not visible to readers */ 7286 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 7287 kfree(mod_func->name); 7288 list_del(&mod_func->list); 7289 kfree(mod_func); 7290 } 7291 7292 kfree(mod_map); 7293 } 7294 7295 void ftrace_release_mod(struct module *mod) 7296 { 7297 struct ftrace_mod_map *mod_map; 7298 struct ftrace_mod_map *n; 7299 struct dyn_ftrace *rec; 7300 struct ftrace_page **last_pg; 7301 struct ftrace_page *tmp_page = NULL; 7302 struct ftrace_page *pg; 7303 7304 mutex_lock(&ftrace_lock); 7305 7306 if (ftrace_disabled) 7307 goto out_unlock; 7308 7309 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 7310 if (mod_map->mod == mod) { 7311 list_del_rcu(&mod_map->list); 7312 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 7313 break; 7314 } 7315 } 7316 7317 /* 7318 * Each module has its own ftrace_pages, remove 7319 * them from the list. 7320 */ 7321 last_pg = &ftrace_pages_start; 7322 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 7323 rec = &pg->records[0]; 7324 if (within_module(rec->ip, mod)) { 7325 /* 7326 * As core pages are first, the first 7327 * page should never be a module page. 7328 */ 7329 if (WARN_ON(pg == ftrace_pages_start)) 7330 goto out_unlock; 7331 7332 /* Check if we are deleting the last page */ 7333 if (pg == ftrace_pages) 7334 ftrace_pages = next_to_ftrace_page(last_pg); 7335 7336 ftrace_update_tot_cnt -= pg->index; 7337 *last_pg = pg->next; 7338 7339 pg->next = tmp_page; 7340 tmp_page = pg; 7341 } else 7342 last_pg = &pg->next; 7343 } 7344 out_unlock: 7345 mutex_unlock(&ftrace_lock); 7346 7347 /* Need to synchronize with ftrace_location_range() */ 7348 if (tmp_page) 7349 synchronize_rcu(); 7350 for (pg = tmp_page; pg; pg = tmp_page) { 7351 7352 /* Needs to be called outside of ftrace_lock */ 7353 clear_mod_from_hashes(pg); 7354 7355 if (pg->records) { 7356 free_pages((unsigned long)pg->records, pg->order); 7357 ftrace_number_of_pages -= 1 << pg->order; 7358 } 7359 tmp_page = pg->next; 7360 kfree(pg); 7361 ftrace_number_of_groups--; 7362 } 7363 } 7364 7365 void ftrace_module_enable(struct module *mod) 7366 { 7367 struct dyn_ftrace *rec; 7368 struct ftrace_page *pg; 7369 7370 mutex_lock(&ftrace_lock); 7371 7372 if (ftrace_disabled) 7373 goto out_unlock; 7374 7375 /* 7376 * If the tracing is enabled, go ahead and enable the record. 7377 * 7378 * The reason not to enable the record immediately is the 7379 * inherent check of ftrace_make_nop/ftrace_make_call for 7380 * correct previous instructions. Making first the NOP 7381 * conversion puts the module to the correct state, thus 7382 * passing the ftrace_make_call check. 7383 * 7384 * We also delay this to after the module code already set the 7385 * text to read-only, as we now need to set it back to read-write 7386 * so that we can modify the text. 7387 */ 7388 if (ftrace_start_up) 7389 ftrace_arch_code_modify_prepare(); 7390 7391 do_for_each_ftrace_rec(pg, rec) { 7392 int cnt; 7393 /* 7394 * do_for_each_ftrace_rec() is a double loop. 7395 * module text shares the pg. If a record is 7396 * not part of this module, then skip this pg, 7397 * which the "break" will do. 7398 */ 7399 if (!within_module(rec->ip, mod)) 7400 break; 7401 7402 /* Weak functions should still be ignored */ 7403 if (!test_for_valid_rec(rec)) { 7404 /* Clear all other flags. Should not be enabled anyway */ 7405 rec->flags = FTRACE_FL_DISABLED; 7406 continue; 7407 } 7408 7409 cnt = 0; 7410 7411 /* 7412 * When adding a module, we need to check if tracers are 7413 * currently enabled and if they are, and can trace this record, 7414 * we need to enable the module functions as well as update the 7415 * reference counts for those function records. 7416 */ 7417 if (ftrace_start_up) 7418 cnt += referenced_filters(rec); 7419 7420 rec->flags &= ~FTRACE_FL_DISABLED; 7421 rec->flags += cnt; 7422 7423 if (ftrace_start_up && cnt) { 7424 int failed = __ftrace_replace_code(rec, 1); 7425 if (failed) { 7426 ftrace_bug(failed, rec); 7427 goto out_loop; 7428 } 7429 } 7430 7431 } while_for_each_ftrace_rec(); 7432 7433 out_loop: 7434 if (ftrace_start_up) 7435 ftrace_arch_code_modify_post_process(); 7436 7437 out_unlock: 7438 mutex_unlock(&ftrace_lock); 7439 7440 process_cached_mods(mod->name); 7441 } 7442 7443 void ftrace_module_init(struct module *mod) 7444 { 7445 int ret; 7446 7447 if (ftrace_disabled || !mod->num_ftrace_callsites) 7448 return; 7449 7450 ret = ftrace_process_locs(mod, mod->ftrace_callsites, 7451 mod->ftrace_callsites + mod->num_ftrace_callsites); 7452 if (ret) 7453 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n", 7454 mod->name); 7455 } 7456 7457 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7458 struct dyn_ftrace *rec) 7459 { 7460 struct ftrace_mod_func *mod_func; 7461 unsigned long symsize; 7462 unsigned long offset; 7463 char str[KSYM_SYMBOL_LEN]; 7464 char *modname; 7465 const char *ret; 7466 7467 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 7468 if (!ret) 7469 return; 7470 7471 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 7472 if (!mod_func) 7473 return; 7474 7475 mod_func->name = kstrdup(str, GFP_KERNEL); 7476 if (!mod_func->name) { 7477 kfree(mod_func); 7478 return; 7479 } 7480 7481 mod_func->ip = rec->ip - offset; 7482 mod_func->size = symsize; 7483 7484 mod_map->num_funcs++; 7485 7486 list_add_rcu(&mod_func->list, &mod_map->funcs); 7487 } 7488 7489 static struct ftrace_mod_map * 7490 allocate_ftrace_mod_map(struct module *mod, 7491 unsigned long start, unsigned long end) 7492 { 7493 struct ftrace_mod_map *mod_map; 7494 7495 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 7496 if (!mod_map) 7497 return NULL; 7498 7499 mod_map->mod = mod; 7500 mod_map->start_addr = start; 7501 mod_map->end_addr = end; 7502 mod_map->num_funcs = 0; 7503 7504 INIT_LIST_HEAD_RCU(&mod_map->funcs); 7505 7506 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 7507 7508 return mod_map; 7509 } 7510 7511 static int 7512 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 7513 unsigned long addr, unsigned long *size, 7514 unsigned long *off, char *sym) 7515 { 7516 struct ftrace_mod_func *found_func = NULL; 7517 struct ftrace_mod_func *mod_func; 7518 7519 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7520 if (addr >= mod_func->ip && 7521 addr < mod_func->ip + mod_func->size) { 7522 found_func = mod_func; 7523 break; 7524 } 7525 } 7526 7527 if (found_func) { 7528 if (size) 7529 *size = found_func->size; 7530 if (off) 7531 *off = addr - found_func->ip; 7532 return strscpy(sym, found_func->name, KSYM_NAME_LEN); 7533 } 7534 7535 return 0; 7536 } 7537 7538 int 7539 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 7540 unsigned long *off, char **modname, char *sym) 7541 { 7542 struct ftrace_mod_map *mod_map; 7543 int ret = 0; 7544 7545 /* mod_map is freed via call_rcu() */ 7546 preempt_disable(); 7547 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7548 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 7549 if (ret) { 7550 if (modname) 7551 *modname = mod_map->mod->name; 7552 break; 7553 } 7554 } 7555 preempt_enable(); 7556 7557 return ret; 7558 } 7559 7560 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7561 char *type, char *name, 7562 char *module_name, int *exported) 7563 { 7564 struct ftrace_mod_map *mod_map; 7565 struct ftrace_mod_func *mod_func; 7566 int ret; 7567 7568 preempt_disable(); 7569 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7570 7571 if (symnum >= mod_map->num_funcs) { 7572 symnum -= mod_map->num_funcs; 7573 continue; 7574 } 7575 7576 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7577 if (symnum > 1) { 7578 symnum--; 7579 continue; 7580 } 7581 7582 *value = mod_func->ip; 7583 *type = 'T'; 7584 strscpy(name, mod_func->name, KSYM_NAME_LEN); 7585 strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 7586 *exported = 1; 7587 preempt_enable(); 7588 return 0; 7589 } 7590 WARN_ON(1); 7591 break; 7592 } 7593 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7594 module_name, exported); 7595 preempt_enable(); 7596 return ret; 7597 } 7598 7599 #else 7600 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7601 struct dyn_ftrace *rec) { } 7602 static inline struct ftrace_mod_map * 7603 allocate_ftrace_mod_map(struct module *mod, 7604 unsigned long start, unsigned long end) 7605 { 7606 return NULL; 7607 } 7608 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7609 char *type, char *name, char *module_name, 7610 int *exported) 7611 { 7612 int ret; 7613 7614 preempt_disable(); 7615 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7616 module_name, exported); 7617 preempt_enable(); 7618 return ret; 7619 } 7620 #endif /* CONFIG_MODULES */ 7621 7622 struct ftrace_init_func { 7623 struct list_head list; 7624 unsigned long ip; 7625 }; 7626 7627 /* Clear any init ips from hashes */ 7628 static void 7629 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 7630 { 7631 struct ftrace_func_entry *entry; 7632 7633 entry = ftrace_lookup_ip(hash, func->ip); 7634 /* 7635 * Do not allow this rec to match again. 7636 * Yeah, it may waste some memory, but will be removed 7637 * if/when the hash is modified again. 7638 */ 7639 if (entry) 7640 entry->ip = 0; 7641 } 7642 7643 static void 7644 clear_func_from_hashes(struct ftrace_init_func *func) 7645 { 7646 struct trace_array *tr; 7647 7648 mutex_lock(&trace_types_lock); 7649 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 7650 if (!tr->ops || !tr->ops->func_hash) 7651 continue; 7652 mutex_lock(&tr->ops->func_hash->regex_lock); 7653 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 7654 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 7655 mutex_unlock(&tr->ops->func_hash->regex_lock); 7656 } 7657 mutex_unlock(&trace_types_lock); 7658 } 7659 7660 static void add_to_clear_hash_list(struct list_head *clear_list, 7661 struct dyn_ftrace *rec) 7662 { 7663 struct ftrace_init_func *func; 7664 7665 func = kmalloc(sizeof(*func), GFP_KERNEL); 7666 if (!func) { 7667 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); 7668 return; 7669 } 7670 7671 func->ip = rec->ip; 7672 list_add(&func->list, clear_list); 7673 } 7674 7675 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 7676 { 7677 unsigned long start = (unsigned long)(start_ptr); 7678 unsigned long end = (unsigned long)(end_ptr); 7679 struct ftrace_page **last_pg = &ftrace_pages_start; 7680 struct ftrace_page *tmp_page = NULL; 7681 struct ftrace_page *pg; 7682 struct dyn_ftrace *rec; 7683 struct dyn_ftrace key; 7684 struct ftrace_mod_map *mod_map = NULL; 7685 struct ftrace_init_func *func, *func_next; 7686 LIST_HEAD(clear_hash); 7687 7688 key.ip = start; 7689 key.flags = end; /* overload flags, as it is unsigned long */ 7690 7691 mutex_lock(&ftrace_lock); 7692 7693 /* 7694 * If we are freeing module init memory, then check if 7695 * any tracer is active. If so, we need to save a mapping of 7696 * the module functions being freed with the address. 7697 */ 7698 if (mod && ftrace_ops_list != &ftrace_list_end) 7699 mod_map = allocate_ftrace_mod_map(mod, start, end); 7700 7701 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 7702 if (end < pg->records[0].ip || 7703 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 7704 continue; 7705 again: 7706 rec = bsearch(&key, pg->records, pg->index, 7707 sizeof(struct dyn_ftrace), 7708 ftrace_cmp_recs); 7709 if (!rec) 7710 continue; 7711 7712 /* rec will be cleared from hashes after ftrace_lock unlock */ 7713 add_to_clear_hash_list(&clear_hash, rec); 7714 7715 if (mod_map) 7716 save_ftrace_mod_rec(mod_map, rec); 7717 7718 pg->index--; 7719 ftrace_update_tot_cnt--; 7720 if (!pg->index) { 7721 *last_pg = pg->next; 7722 pg->next = tmp_page; 7723 tmp_page = pg; 7724 pg = container_of(last_pg, struct ftrace_page, next); 7725 if (!(*last_pg)) 7726 ftrace_pages = pg; 7727 continue; 7728 } 7729 memmove(rec, rec + 1, 7730 (pg->index - (rec - pg->records)) * sizeof(*rec)); 7731 /* More than one function may be in this block */ 7732 goto again; 7733 } 7734 mutex_unlock(&ftrace_lock); 7735 7736 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 7737 clear_func_from_hashes(func); 7738 kfree(func); 7739 } 7740 /* Need to synchronize with ftrace_location_range() */ 7741 if (tmp_page) { 7742 synchronize_rcu(); 7743 ftrace_free_pages(tmp_page); 7744 } 7745 } 7746 7747 void __init ftrace_free_init_mem(void) 7748 { 7749 void *start = (void *)(&__init_begin); 7750 void *end = (void *)(&__init_end); 7751 7752 ftrace_boot_snapshot(); 7753 7754 ftrace_free_mem(NULL, start, end); 7755 } 7756 7757 int __init __weak ftrace_dyn_arch_init(void) 7758 { 7759 return 0; 7760 } 7761 7762 void __init ftrace_init(void) 7763 { 7764 extern unsigned long __start_mcount_loc[]; 7765 extern unsigned long __stop_mcount_loc[]; 7766 unsigned long count, flags; 7767 int ret; 7768 7769 local_irq_save(flags); 7770 ret = ftrace_dyn_arch_init(); 7771 local_irq_restore(flags); 7772 if (ret) 7773 goto failed; 7774 7775 count = __stop_mcount_loc - __start_mcount_loc; 7776 if (!count) { 7777 pr_info("ftrace: No functions to be traced?\n"); 7778 goto failed; 7779 } 7780 7781 pr_info("ftrace: allocating %ld entries in %ld pages\n", 7782 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 7783 7784 ret = ftrace_process_locs(NULL, 7785 __start_mcount_loc, 7786 __stop_mcount_loc); 7787 if (ret) { 7788 pr_warn("ftrace: failed to allocate entries for functions\n"); 7789 goto failed; 7790 } 7791 7792 pr_info("ftrace: allocated %ld pages with %ld groups\n", 7793 ftrace_number_of_pages, ftrace_number_of_groups); 7794 7795 last_ftrace_enabled = ftrace_enabled = 1; 7796 7797 set_ftrace_early_filters(); 7798 7799 return; 7800 failed: 7801 ftrace_disabled = 1; 7802 } 7803 7804 /* Do nothing if arch does not support this */ 7805 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 7806 { 7807 } 7808 7809 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7810 { 7811 unsigned long trampoline = ops->trampoline; 7812 7813 arch_ftrace_update_trampoline(ops); 7814 if (ops->trampoline && ops->trampoline != trampoline && 7815 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { 7816 /* Add to kallsyms before the perf events */ 7817 ftrace_add_trampoline_to_kallsyms(ops); 7818 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 7819 ops->trampoline, ops->trampoline_size, false, 7820 FTRACE_TRAMPOLINE_SYM); 7821 /* 7822 * Record the perf text poke event after the ksymbol register 7823 * event. 7824 */ 7825 perf_event_text_poke((void *)ops->trampoline, NULL, 0, 7826 (void *)ops->trampoline, 7827 ops->trampoline_size); 7828 } 7829 } 7830 7831 void ftrace_init_trace_array(struct trace_array *tr) 7832 { 7833 if (tr->flags & TRACE_ARRAY_FL_MOD_INIT) 7834 return; 7835 7836 INIT_LIST_HEAD(&tr->func_probes); 7837 INIT_LIST_HEAD(&tr->mod_trace); 7838 INIT_LIST_HEAD(&tr->mod_notrace); 7839 7840 tr->flags |= TRACE_ARRAY_FL_MOD_INIT; 7841 } 7842 #else 7843 7844 struct ftrace_ops global_ops = { 7845 .func = ftrace_stub, 7846 .flags = FTRACE_OPS_FL_INITIALIZED | 7847 FTRACE_OPS_FL_PID, 7848 }; 7849 7850 static int __init ftrace_nodyn_init(void) 7851 { 7852 ftrace_enabled = 1; 7853 return 0; 7854 } 7855 core_initcall(ftrace_nodyn_init); 7856 7857 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 7858 static inline void ftrace_startup_all(int command) { } 7859 7860 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7861 { 7862 } 7863 7864 #endif /* CONFIG_DYNAMIC_FTRACE */ 7865 7866 __init void ftrace_init_global_array_ops(struct trace_array *tr) 7867 { 7868 tr->ops = &global_ops; 7869 if (!global_ops.private) 7870 global_ops.private = tr; 7871 ftrace_init_trace_array(tr); 7872 init_array_fgraph_ops(tr, tr->ops); 7873 } 7874 7875 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 7876 { 7877 /* If we filter on pids, update to use the pid function */ 7878 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 7879 if (WARN_ON(tr->ops->func != ftrace_stub)) 7880 printk("ftrace ops had %pS for function\n", 7881 tr->ops->func); 7882 } 7883 tr->ops->func = func; 7884 tr->ops->private = tr; 7885 } 7886 7887 void ftrace_reset_array_ops(struct trace_array *tr) 7888 { 7889 tr->ops->func = ftrace_stub; 7890 } 7891 7892 static nokprobe_inline void 7893 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7894 struct ftrace_ops *ignored, struct ftrace_regs *fregs) 7895 { 7896 struct pt_regs *regs = ftrace_get_regs(fregs); 7897 struct ftrace_ops *op; 7898 int bit; 7899 7900 /* 7901 * The ftrace_test_and_set_recursion() will disable preemption, 7902 * which is required since some of the ops may be dynamically 7903 * allocated, they must be freed after a synchronize_rcu(). 7904 */ 7905 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7906 if (bit < 0) 7907 return; 7908 7909 do_for_each_ftrace_op(op, ftrace_ops_list) { 7910 /* Stub functions don't need to be called nor tested */ 7911 if (op->flags & FTRACE_OPS_FL_STUB) 7912 continue; 7913 /* 7914 * Check the following for each ops before calling their func: 7915 * if RCU flag is set, then rcu_is_watching() must be true 7916 * Otherwise test if the ip matches the ops filter 7917 * 7918 * If any of the above fails then the op->func() is not executed. 7919 */ 7920 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 7921 ftrace_ops_test(op, ip, regs)) { 7922 if (FTRACE_WARN_ON(!op->func)) { 7923 pr_warn("op=%p %pS\n", op, op); 7924 goto out; 7925 } 7926 op->func(ip, parent_ip, op, fregs); 7927 } 7928 } while_for_each_ftrace_op(op); 7929 out: 7930 trace_clear_recursion(bit); 7931 } 7932 7933 /* 7934 * Some archs only support passing ip and parent_ip. Even though 7935 * the list function ignores the op parameter, we do not want any 7936 * C side effects, where a function is called without the caller 7937 * sending a third parameter. 7938 * Archs are to support both the regs and ftrace_ops at the same time. 7939 * If they support ftrace_ops, it is assumed they support regs. 7940 * If call backs want to use regs, they must either check for regs 7941 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 7942 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 7943 * An architecture can pass partial regs with ftrace_ops and still 7944 * set the ARCH_SUPPORTS_FTRACE_OPS. 7945 * 7946 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be 7947 * arch_ftrace_ops_list_func. 7948 */ 7949 #if ARCH_SUPPORTS_FTRACE_OPS 7950 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7951 struct ftrace_ops *op, struct ftrace_regs *fregs) 7952 { 7953 kmsan_unpoison_memory(fregs, ftrace_regs_size()); 7954 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); 7955 } 7956 #else 7957 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 7958 { 7959 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 7960 } 7961 #endif 7962 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); 7963 7964 /* 7965 * If there's only one function registered but it does not support 7966 * recursion, needs RCU protection, then this function will be called 7967 * by the mcount trampoline. 7968 */ 7969 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 7970 struct ftrace_ops *op, struct ftrace_regs *fregs) 7971 { 7972 int bit; 7973 7974 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7975 if (bit < 0) 7976 return; 7977 7978 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) 7979 op->func(ip, parent_ip, op, fregs); 7980 7981 trace_clear_recursion(bit); 7982 } 7983 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 7984 7985 /** 7986 * ftrace_ops_get_func - get the function a trampoline should call 7987 * @ops: the ops to get the function for 7988 * 7989 * Normally the mcount trampoline will call the ops->func, but there 7990 * are times that it should not. For example, if the ops does not 7991 * have its own recursion protection, then it should call the 7992 * ftrace_ops_assist_func() instead. 7993 * 7994 * Returns: the function that the trampoline should call for @ops. 7995 */ 7996 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 7997 { 7998 /* 7999 * If the function does not handle recursion or needs to be RCU safe, 8000 * then we need to call the assist handler. 8001 */ 8002 if (ops->flags & (FTRACE_OPS_FL_RECURSION | 8003 FTRACE_OPS_FL_RCU)) 8004 return ftrace_ops_assist_func; 8005 8006 return ops->func; 8007 } 8008 8009 static void 8010 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 8011 struct task_struct *prev, 8012 struct task_struct *next, 8013 unsigned int prev_state) 8014 { 8015 struct trace_array *tr = data; 8016 struct trace_pid_list *pid_list; 8017 struct trace_pid_list *no_pid_list; 8018 8019 pid_list = rcu_dereference_sched(tr->function_pids); 8020 no_pid_list = rcu_dereference_sched(tr->function_no_pids); 8021 8022 if (trace_ignore_this_task(pid_list, no_pid_list, next)) 8023 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8024 FTRACE_PID_IGNORE); 8025 else 8026 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8027 next->pid); 8028 } 8029 8030 static void 8031 ftrace_pid_follow_sched_process_fork(void *data, 8032 struct task_struct *self, 8033 struct task_struct *task) 8034 { 8035 struct trace_pid_list *pid_list; 8036 struct trace_array *tr = data; 8037 8038 pid_list = rcu_dereference_sched(tr->function_pids); 8039 trace_filter_add_remove_task(pid_list, self, task); 8040 8041 pid_list = rcu_dereference_sched(tr->function_no_pids); 8042 trace_filter_add_remove_task(pid_list, self, task); 8043 } 8044 8045 static void 8046 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 8047 { 8048 struct trace_pid_list *pid_list; 8049 struct trace_array *tr = data; 8050 8051 pid_list = rcu_dereference_sched(tr->function_pids); 8052 trace_filter_add_remove_task(pid_list, NULL, task); 8053 8054 pid_list = rcu_dereference_sched(tr->function_no_pids); 8055 trace_filter_add_remove_task(pid_list, NULL, task); 8056 } 8057 8058 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 8059 { 8060 if (enable) { 8061 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 8062 tr); 8063 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 8064 tr); 8065 } else { 8066 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 8067 tr); 8068 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 8069 tr); 8070 } 8071 } 8072 8073 static void clear_ftrace_pids(struct trace_array *tr, int type) 8074 { 8075 struct trace_pid_list *pid_list; 8076 struct trace_pid_list *no_pid_list; 8077 int cpu; 8078 8079 pid_list = rcu_dereference_protected(tr->function_pids, 8080 lockdep_is_held(&ftrace_lock)); 8081 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 8082 lockdep_is_held(&ftrace_lock)); 8083 8084 /* Make sure there's something to do */ 8085 if (!pid_type_enabled(type, pid_list, no_pid_list)) 8086 return; 8087 8088 /* See if the pids still need to be checked after this */ 8089 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 8090 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 8091 for_each_possible_cpu(cpu) 8092 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; 8093 } 8094 8095 if (type & TRACE_PIDS) 8096 rcu_assign_pointer(tr->function_pids, NULL); 8097 8098 if (type & TRACE_NO_PIDS) 8099 rcu_assign_pointer(tr->function_no_pids, NULL); 8100 8101 /* Wait till all users are no longer using pid filtering */ 8102 synchronize_rcu(); 8103 8104 if ((type & TRACE_PIDS) && pid_list) 8105 trace_pid_list_free(pid_list); 8106 8107 if ((type & TRACE_NO_PIDS) && no_pid_list) 8108 trace_pid_list_free(no_pid_list); 8109 } 8110 8111 void ftrace_clear_pids(struct trace_array *tr) 8112 { 8113 mutex_lock(&ftrace_lock); 8114 8115 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 8116 8117 mutex_unlock(&ftrace_lock); 8118 } 8119 8120 static void ftrace_pid_reset(struct trace_array *tr, int type) 8121 { 8122 mutex_lock(&ftrace_lock); 8123 clear_ftrace_pids(tr, type); 8124 8125 ftrace_update_pid_func(); 8126 ftrace_startup_all(0); 8127 8128 mutex_unlock(&ftrace_lock); 8129 } 8130 8131 /* Greater than any max PID */ 8132 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 8133 8134 static void *fpid_start(struct seq_file *m, loff_t *pos) 8135 __acquires(RCU) 8136 { 8137 struct trace_pid_list *pid_list; 8138 struct trace_array *tr = m->private; 8139 8140 mutex_lock(&ftrace_lock); 8141 rcu_read_lock_sched(); 8142 8143 pid_list = rcu_dereference_sched(tr->function_pids); 8144 8145 if (!pid_list) 8146 return !(*pos) ? FTRACE_NO_PIDS : NULL; 8147 8148 return trace_pid_start(pid_list, pos); 8149 } 8150 8151 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 8152 { 8153 struct trace_array *tr = m->private; 8154 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 8155 8156 if (v == FTRACE_NO_PIDS) { 8157 (*pos)++; 8158 return NULL; 8159 } 8160 return trace_pid_next(pid_list, v, pos); 8161 } 8162 8163 static void fpid_stop(struct seq_file *m, void *p) 8164 __releases(RCU) 8165 { 8166 rcu_read_unlock_sched(); 8167 mutex_unlock(&ftrace_lock); 8168 } 8169 8170 static int fpid_show(struct seq_file *m, void *v) 8171 { 8172 if (v == FTRACE_NO_PIDS) { 8173 seq_puts(m, "no pid\n"); 8174 return 0; 8175 } 8176 8177 return trace_pid_show(m, v); 8178 } 8179 8180 static const struct seq_operations ftrace_pid_sops = { 8181 .start = fpid_start, 8182 .next = fpid_next, 8183 .stop = fpid_stop, 8184 .show = fpid_show, 8185 }; 8186 8187 static void *fnpid_start(struct seq_file *m, loff_t *pos) 8188 __acquires(RCU) 8189 { 8190 struct trace_pid_list *pid_list; 8191 struct trace_array *tr = m->private; 8192 8193 mutex_lock(&ftrace_lock); 8194 rcu_read_lock_sched(); 8195 8196 pid_list = rcu_dereference_sched(tr->function_no_pids); 8197 8198 if (!pid_list) 8199 return !(*pos) ? FTRACE_NO_PIDS : NULL; 8200 8201 return trace_pid_start(pid_list, pos); 8202 } 8203 8204 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) 8205 { 8206 struct trace_array *tr = m->private; 8207 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); 8208 8209 if (v == FTRACE_NO_PIDS) { 8210 (*pos)++; 8211 return NULL; 8212 } 8213 return trace_pid_next(pid_list, v, pos); 8214 } 8215 8216 static const struct seq_operations ftrace_no_pid_sops = { 8217 .start = fnpid_start, 8218 .next = fnpid_next, 8219 .stop = fpid_stop, 8220 .show = fpid_show, 8221 }; 8222 8223 static int pid_open(struct inode *inode, struct file *file, int type) 8224 { 8225 const struct seq_operations *seq_ops; 8226 struct trace_array *tr = inode->i_private; 8227 struct seq_file *m; 8228 int ret = 0; 8229 8230 ret = tracing_check_open_get_tr(tr); 8231 if (ret) 8232 return ret; 8233 8234 if ((file->f_mode & FMODE_WRITE) && 8235 (file->f_flags & O_TRUNC)) 8236 ftrace_pid_reset(tr, type); 8237 8238 switch (type) { 8239 case TRACE_PIDS: 8240 seq_ops = &ftrace_pid_sops; 8241 break; 8242 case TRACE_NO_PIDS: 8243 seq_ops = &ftrace_no_pid_sops; 8244 break; 8245 default: 8246 trace_array_put(tr); 8247 WARN_ON_ONCE(1); 8248 return -EINVAL; 8249 } 8250 8251 ret = seq_open(file, seq_ops); 8252 if (ret < 0) { 8253 trace_array_put(tr); 8254 } else { 8255 m = file->private_data; 8256 /* copy tr over to seq ops */ 8257 m->private = tr; 8258 } 8259 8260 return ret; 8261 } 8262 8263 static int 8264 ftrace_pid_open(struct inode *inode, struct file *file) 8265 { 8266 return pid_open(inode, file, TRACE_PIDS); 8267 } 8268 8269 static int 8270 ftrace_no_pid_open(struct inode *inode, struct file *file) 8271 { 8272 return pid_open(inode, file, TRACE_NO_PIDS); 8273 } 8274 8275 static void ignore_task_cpu(void *data) 8276 { 8277 struct trace_array *tr = data; 8278 struct trace_pid_list *pid_list; 8279 struct trace_pid_list *no_pid_list; 8280 8281 /* 8282 * This function is called by on_each_cpu() while the 8283 * event_mutex is held. 8284 */ 8285 pid_list = rcu_dereference_protected(tr->function_pids, 8286 mutex_is_locked(&ftrace_lock)); 8287 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 8288 mutex_is_locked(&ftrace_lock)); 8289 8290 if (trace_ignore_this_task(pid_list, no_pid_list, current)) 8291 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8292 FTRACE_PID_IGNORE); 8293 else 8294 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8295 current->pid); 8296 } 8297 8298 static ssize_t 8299 pid_write(struct file *filp, const char __user *ubuf, 8300 size_t cnt, loff_t *ppos, int type) 8301 { 8302 struct seq_file *m = filp->private_data; 8303 struct trace_array *tr = m->private; 8304 struct trace_pid_list *filtered_pids; 8305 struct trace_pid_list *other_pids; 8306 struct trace_pid_list *pid_list; 8307 ssize_t ret; 8308 8309 if (!cnt) 8310 return 0; 8311 8312 guard(mutex)(&ftrace_lock); 8313 8314 switch (type) { 8315 case TRACE_PIDS: 8316 filtered_pids = rcu_dereference_protected(tr->function_pids, 8317 lockdep_is_held(&ftrace_lock)); 8318 other_pids = rcu_dereference_protected(tr->function_no_pids, 8319 lockdep_is_held(&ftrace_lock)); 8320 break; 8321 case TRACE_NO_PIDS: 8322 filtered_pids = rcu_dereference_protected(tr->function_no_pids, 8323 lockdep_is_held(&ftrace_lock)); 8324 other_pids = rcu_dereference_protected(tr->function_pids, 8325 lockdep_is_held(&ftrace_lock)); 8326 break; 8327 default: 8328 WARN_ON_ONCE(1); 8329 return -EINVAL; 8330 } 8331 8332 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 8333 if (ret < 0) 8334 return ret; 8335 8336 switch (type) { 8337 case TRACE_PIDS: 8338 rcu_assign_pointer(tr->function_pids, pid_list); 8339 break; 8340 case TRACE_NO_PIDS: 8341 rcu_assign_pointer(tr->function_no_pids, pid_list); 8342 break; 8343 } 8344 8345 8346 if (filtered_pids) { 8347 synchronize_rcu(); 8348 trace_pid_list_free(filtered_pids); 8349 } else if (pid_list && !other_pids) { 8350 /* Register a probe to set whether to ignore the tracing of a task */ 8351 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 8352 } 8353 8354 /* 8355 * Ignoring of pids is done at task switch. But we have to 8356 * check for those tasks that are currently running. 8357 * Always do this in case a pid was appended or removed. 8358 */ 8359 on_each_cpu(ignore_task_cpu, tr, 1); 8360 8361 ftrace_update_pid_func(); 8362 ftrace_startup_all(0); 8363 8364 *ppos += ret; 8365 8366 return ret; 8367 } 8368 8369 static ssize_t 8370 ftrace_pid_write(struct file *filp, const char __user *ubuf, 8371 size_t cnt, loff_t *ppos) 8372 { 8373 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 8374 } 8375 8376 static ssize_t 8377 ftrace_no_pid_write(struct file *filp, const char __user *ubuf, 8378 size_t cnt, loff_t *ppos) 8379 { 8380 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 8381 } 8382 8383 static int 8384 ftrace_pid_release(struct inode *inode, struct file *file) 8385 { 8386 struct trace_array *tr = inode->i_private; 8387 8388 trace_array_put(tr); 8389 8390 return seq_release(inode, file); 8391 } 8392 8393 static const struct file_operations ftrace_pid_fops = { 8394 .open = ftrace_pid_open, 8395 .write = ftrace_pid_write, 8396 .read = seq_read, 8397 .llseek = tracing_lseek, 8398 .release = ftrace_pid_release, 8399 }; 8400 8401 static const struct file_operations ftrace_no_pid_fops = { 8402 .open = ftrace_no_pid_open, 8403 .write = ftrace_no_pid_write, 8404 .read = seq_read, 8405 .llseek = tracing_lseek, 8406 .release = ftrace_pid_release, 8407 }; 8408 8409 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 8410 { 8411 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer, 8412 tr, &ftrace_pid_fops); 8413 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE, 8414 d_tracer, tr, &ftrace_no_pid_fops); 8415 } 8416 8417 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 8418 struct dentry *d_tracer) 8419 { 8420 /* Only the top level directory has the dyn_tracefs and profile */ 8421 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 8422 8423 ftrace_init_dyn_tracefs(d_tracer); 8424 ftrace_profile_tracefs(d_tracer); 8425 } 8426 8427 /** 8428 * ftrace_kill - kill ftrace 8429 * 8430 * This function should be used by panic code. It stops ftrace 8431 * but in a not so nice way. If you need to simply kill ftrace 8432 * from a non-atomic section, use ftrace_kill. 8433 */ 8434 void ftrace_kill(void) 8435 { 8436 ftrace_disabled = 1; 8437 ftrace_enabled = 0; 8438 ftrace_trace_function = ftrace_stub; 8439 kprobe_ftrace_kill(); 8440 } 8441 8442 /** 8443 * ftrace_is_dead - Test if ftrace is dead or not. 8444 * 8445 * Returns: 1 if ftrace is "dead", zero otherwise. 8446 */ 8447 int ftrace_is_dead(void) 8448 { 8449 return ftrace_disabled; 8450 } 8451 8452 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 8453 /* 8454 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure 8455 * it doesn't conflict with any direct ftrace_ops. If there is existing 8456 * direct ftrace_ops on a kernel function being patched, call 8457 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. 8458 * 8459 * @ops: ftrace_ops being registered. 8460 * 8461 * Returns: 8462 * 0 on success; 8463 * Negative on failure. 8464 */ 8465 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8466 { 8467 struct ftrace_func_entry *entry; 8468 struct ftrace_hash *hash; 8469 struct ftrace_ops *op; 8470 int size, i, ret; 8471 8472 lockdep_assert_held_once(&direct_mutex); 8473 8474 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8475 return 0; 8476 8477 hash = ops->func_hash->filter_hash; 8478 size = 1 << hash->size_bits; 8479 for (i = 0; i < size; i++) { 8480 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8481 unsigned long ip = entry->ip; 8482 bool found_op = false; 8483 8484 mutex_lock(&ftrace_lock); 8485 do_for_each_ftrace_op(op, ftrace_ops_list) { 8486 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8487 continue; 8488 if (ops_references_ip(op, ip)) { 8489 found_op = true; 8490 break; 8491 } 8492 } while_for_each_ftrace_op(op); 8493 mutex_unlock(&ftrace_lock); 8494 8495 if (found_op) { 8496 if (!op->ops_func) 8497 return -EBUSY; 8498 8499 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); 8500 if (ret) 8501 return ret; 8502 } 8503 } 8504 } 8505 8506 return 0; 8507 } 8508 8509 /* 8510 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops 8511 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT 8512 * ops. 8513 */ 8514 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8515 { 8516 struct ftrace_func_entry *entry; 8517 struct ftrace_hash *hash; 8518 struct ftrace_ops *op; 8519 int size, i; 8520 8521 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8522 return; 8523 8524 mutex_lock(&direct_mutex); 8525 8526 hash = ops->func_hash->filter_hash; 8527 size = 1 << hash->size_bits; 8528 for (i = 0; i < size; i++) { 8529 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8530 unsigned long ip = entry->ip; 8531 bool found_op = false; 8532 8533 mutex_lock(&ftrace_lock); 8534 do_for_each_ftrace_op(op, ftrace_ops_list) { 8535 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8536 continue; 8537 if (ops_references_ip(op, ip)) { 8538 found_op = true; 8539 break; 8540 } 8541 } while_for_each_ftrace_op(op); 8542 mutex_unlock(&ftrace_lock); 8543 8544 /* The cleanup is optional, ignore any errors */ 8545 if (found_op && op->ops_func) 8546 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); 8547 } 8548 } 8549 mutex_unlock(&direct_mutex); 8550 } 8551 8552 #define lock_direct_mutex() mutex_lock(&direct_mutex) 8553 #define unlock_direct_mutex() mutex_unlock(&direct_mutex) 8554 8555 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8556 8557 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8558 { 8559 return 0; 8560 } 8561 8562 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8563 { 8564 } 8565 8566 #define lock_direct_mutex() do { } while (0) 8567 #define unlock_direct_mutex() do { } while (0) 8568 8569 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8570 8571 /* 8572 * Similar to register_ftrace_function, except we don't lock direct_mutex. 8573 */ 8574 static int register_ftrace_function_nolock(struct ftrace_ops *ops) 8575 { 8576 int ret; 8577 8578 ftrace_ops_init(ops); 8579 8580 mutex_lock(&ftrace_lock); 8581 8582 ret = ftrace_startup(ops, 0); 8583 8584 mutex_unlock(&ftrace_lock); 8585 8586 return ret; 8587 } 8588 8589 /** 8590 * register_ftrace_function - register a function for profiling 8591 * @ops: ops structure that holds the function for profiling. 8592 * 8593 * Register a function to be called by all functions in the 8594 * kernel. 8595 * 8596 * Note: @ops->func and all the functions it calls must be labeled 8597 * with "notrace", otherwise it will go into a 8598 * recursive loop. 8599 */ 8600 int register_ftrace_function(struct ftrace_ops *ops) 8601 { 8602 int ret; 8603 8604 lock_direct_mutex(); 8605 ret = prepare_direct_functions_for_ipmodify(ops); 8606 if (ret < 0) 8607 goto out_unlock; 8608 8609 ret = register_ftrace_function_nolock(ops); 8610 8611 out_unlock: 8612 unlock_direct_mutex(); 8613 return ret; 8614 } 8615 EXPORT_SYMBOL_GPL(register_ftrace_function); 8616 8617 /** 8618 * unregister_ftrace_function - unregister a function for profiling. 8619 * @ops: ops structure that holds the function to unregister 8620 * 8621 * Unregister a function that was added to be called by ftrace profiling. 8622 */ 8623 int unregister_ftrace_function(struct ftrace_ops *ops) 8624 { 8625 int ret; 8626 8627 mutex_lock(&ftrace_lock); 8628 ret = ftrace_shutdown(ops, 0); 8629 mutex_unlock(&ftrace_lock); 8630 8631 cleanup_direct_functions_after_ipmodify(ops); 8632 return ret; 8633 } 8634 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 8635 8636 static int symbols_cmp(const void *a, const void *b) 8637 { 8638 const char **str_a = (const char **) a; 8639 const char **str_b = (const char **) b; 8640 8641 return strcmp(*str_a, *str_b); 8642 } 8643 8644 struct kallsyms_data { 8645 unsigned long *addrs; 8646 const char **syms; 8647 size_t cnt; 8648 size_t found; 8649 }; 8650 8651 /* This function gets called for all kernel and module symbols 8652 * and returns 1 in case we resolved all the requested symbols, 8653 * 0 otherwise. 8654 */ 8655 static int kallsyms_callback(void *data, const char *name, unsigned long addr) 8656 { 8657 struct kallsyms_data *args = data; 8658 const char **sym; 8659 int idx; 8660 8661 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); 8662 if (!sym) 8663 return 0; 8664 8665 idx = sym - args->syms; 8666 if (args->addrs[idx]) 8667 return 0; 8668 8669 if (!ftrace_location(addr)) 8670 return 0; 8671 8672 args->addrs[idx] = addr; 8673 args->found++; 8674 return args->found == args->cnt ? 1 : 0; 8675 } 8676 8677 /** 8678 * ftrace_lookup_symbols - Lookup addresses for array of symbols 8679 * 8680 * @sorted_syms: array of symbols pointers symbols to resolve, 8681 * must be alphabetically sorted 8682 * @cnt: number of symbols/addresses in @syms/@addrs arrays 8683 * @addrs: array for storing resulting addresses 8684 * 8685 * This function looks up addresses for array of symbols provided in 8686 * @syms array (must be alphabetically sorted) and stores them in 8687 * @addrs array, which needs to be big enough to store at least @cnt 8688 * addresses. 8689 * 8690 * Returns: 0 if all provided symbols are found, -ESRCH otherwise. 8691 */ 8692 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) 8693 { 8694 struct kallsyms_data args; 8695 int found_all; 8696 8697 memset(addrs, 0, sizeof(*addrs) * cnt); 8698 args.addrs = addrs; 8699 args.syms = sorted_syms; 8700 args.cnt = cnt; 8701 args.found = 0; 8702 8703 found_all = kallsyms_on_each_symbol(kallsyms_callback, &args); 8704 if (found_all) 8705 return 0; 8706 found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args); 8707 return found_all ? 0 : -ESRCH; 8708 } 8709 8710 #ifdef CONFIG_SYSCTL 8711 8712 #ifdef CONFIG_DYNAMIC_FTRACE 8713 static void ftrace_startup_sysctl(void) 8714 { 8715 int command; 8716 8717 if (unlikely(ftrace_disabled)) 8718 return; 8719 8720 /* Force update next time */ 8721 saved_ftrace_func = NULL; 8722 /* ftrace_start_up is true if we want ftrace running */ 8723 if (ftrace_start_up) { 8724 command = FTRACE_UPDATE_CALLS; 8725 if (ftrace_graph_active) 8726 command |= FTRACE_START_FUNC_RET; 8727 ftrace_startup_enable(command); 8728 } 8729 } 8730 8731 static void ftrace_shutdown_sysctl(void) 8732 { 8733 int command; 8734 8735 if (unlikely(ftrace_disabled)) 8736 return; 8737 8738 /* ftrace_start_up is true if ftrace is running */ 8739 if (ftrace_start_up) { 8740 command = FTRACE_DISABLE_CALLS; 8741 if (ftrace_graph_active) 8742 command |= FTRACE_STOP_FUNC_RET; 8743 ftrace_run_update_code(command); 8744 } 8745 } 8746 #else 8747 # define ftrace_startup_sysctl() do { } while (0) 8748 # define ftrace_shutdown_sysctl() do { } while (0) 8749 #endif /* CONFIG_DYNAMIC_FTRACE */ 8750 8751 static bool is_permanent_ops_registered(void) 8752 { 8753 struct ftrace_ops *op; 8754 8755 do_for_each_ftrace_op(op, ftrace_ops_list) { 8756 if (op->flags & FTRACE_OPS_FL_PERMANENT) 8757 return true; 8758 } while_for_each_ftrace_op(op); 8759 8760 return false; 8761 } 8762 8763 static int 8764 ftrace_enable_sysctl(const struct ctl_table *table, int write, 8765 void *buffer, size_t *lenp, loff_t *ppos) 8766 { 8767 int ret; 8768 8769 guard(mutex)(&ftrace_lock); 8770 8771 if (unlikely(ftrace_disabled)) 8772 return -ENODEV; 8773 8774 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8775 8776 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 8777 return ret; 8778 8779 if (ftrace_enabled) { 8780 8781 /* we are starting ftrace again */ 8782 if (rcu_dereference_protected(ftrace_ops_list, 8783 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 8784 update_ftrace_function(); 8785 8786 ftrace_startup_sysctl(); 8787 8788 } else { 8789 if (is_permanent_ops_registered()) { 8790 ftrace_enabled = true; 8791 return -EBUSY; 8792 } 8793 8794 /* stopping ftrace calls (just send to ftrace_stub) */ 8795 ftrace_trace_function = ftrace_stub; 8796 8797 ftrace_shutdown_sysctl(); 8798 } 8799 8800 last_ftrace_enabled = !!ftrace_enabled; 8801 return 0; 8802 } 8803 8804 static struct ctl_table ftrace_sysctls[] = { 8805 { 8806 .procname = "ftrace_enabled", 8807 .data = &ftrace_enabled, 8808 .maxlen = sizeof(int), 8809 .mode = 0644, 8810 .proc_handler = ftrace_enable_sysctl, 8811 }, 8812 }; 8813 8814 static int __init ftrace_sysctl_init(void) 8815 { 8816 register_sysctl_init("kernel", ftrace_sysctls); 8817 return 0; 8818 } 8819 late_initcall(ftrace_sysctl_init); 8820 #endif 8821