1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * core.c - Kernel Live Patching Core 4 * 5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 6 * Copyright (C) 2014 SUSE 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/list.h> 16 #include <linux/kallsyms.h> 17 #include <linux/livepatch.h> 18 #include <linux/elf.h> 19 #include <linux/moduleloader.h> 20 #include <linux/completion.h> 21 #include <linux/memory.h> 22 #include <linux/rcupdate.h> 23 #include <asm/cacheflush.h> 24 #include "core.h" 25 #include "patch.h" 26 #include "state.h" 27 #include "transition.h" 28 29 /* 30 * klp_mutex is a coarse lock which serializes access to klp data. All 31 * accesses to klp-related variables and structures must have mutex protection, 32 * except within the following functions which carefully avoid the need for it: 33 * 34 * - klp_ftrace_handler() 35 * - klp_update_patch_state() 36 * - __klp_sched_try_switch() 37 */ 38 DEFINE_MUTEX(klp_mutex); 39 40 /* 41 * Actively used patches: enabled or in transition. Note that replaced 42 * or disabled patches are not listed even though the related kernel 43 * module still can be loaded. 44 */ 45 LIST_HEAD(klp_patches); 46 47 static struct kobject *klp_root_kobj; 48 49 static bool klp_is_module(struct klp_object *obj) 50 { 51 return obj->name; 52 } 53 54 /* sets obj->mod if object is not vmlinux and module is found */ 55 static void klp_find_object_module(struct klp_object *obj) 56 { 57 struct module *mod; 58 59 if (!klp_is_module(obj)) 60 return; 61 62 guard(rcu)(); 63 /* 64 * We do not want to block removal of patched modules and therefore 65 * we do not take a reference here. The patches are removed by 66 * klp_module_going() instead. 67 */ 68 mod = find_module(obj->name); 69 /* 70 * Do not mess work of klp_module_coming() and klp_module_going(). 71 * Note that the patch might still be needed before klp_module_going() 72 * is called. Module functions can be called even in the GOING state 73 * until mod->exit() finishes. This is especially important for 74 * patches that modify semantic of the functions. 75 */ 76 if (mod && mod->klp_alive) 77 obj->mod = mod; 78 } 79 80 static bool klp_initialized(void) 81 { 82 return !!klp_root_kobj; 83 } 84 85 static struct klp_func *klp_find_func(struct klp_object *obj, 86 struct klp_func *old_func) 87 { 88 struct klp_func *func; 89 90 klp_for_each_func(obj, func) { 91 if ((strcmp(old_func->old_name, func->old_name) == 0) && 92 (old_func->old_sympos == func->old_sympos)) { 93 return func; 94 } 95 } 96 97 return NULL; 98 } 99 100 static struct klp_object *klp_find_object(struct klp_patch *patch, 101 struct klp_object *old_obj) 102 { 103 struct klp_object *obj; 104 105 klp_for_each_object(patch, obj) { 106 if (klp_is_module(old_obj)) { 107 if (klp_is_module(obj) && 108 strcmp(old_obj->name, obj->name) == 0) { 109 return obj; 110 } 111 } else if (!klp_is_module(obj)) { 112 return obj; 113 } 114 } 115 116 return NULL; 117 } 118 119 struct klp_find_arg { 120 const char *name; 121 unsigned long addr; 122 unsigned long count; 123 unsigned long pos; 124 }; 125 126 static int klp_match_callback(void *data, unsigned long addr) 127 { 128 struct klp_find_arg *args = data; 129 130 args->addr = addr; 131 args->count++; 132 133 /* 134 * Finish the search when the symbol is found for the desired position 135 * or the position is not defined for a non-unique symbol. 136 */ 137 if ((args->pos && (args->count == args->pos)) || 138 (!args->pos && (args->count > 1))) 139 return 1; 140 141 return 0; 142 } 143 144 static int klp_find_callback(void *data, const char *name, unsigned long addr) 145 { 146 struct klp_find_arg *args = data; 147 148 if (strcmp(args->name, name)) 149 return 0; 150 151 return klp_match_callback(data, addr); 152 } 153 154 static int klp_find_object_symbol(const char *objname, const char *name, 155 unsigned long sympos, unsigned long *addr) 156 { 157 struct klp_find_arg args = { 158 .name = name, 159 .addr = 0, 160 .count = 0, 161 .pos = sympos, 162 }; 163 164 if (objname) 165 module_kallsyms_on_each_symbol(objname, klp_find_callback, &args); 166 else 167 kallsyms_on_each_match_symbol(klp_match_callback, name, &args); 168 169 /* 170 * Ensure an address was found. If sympos is 0, ensure symbol is unique; 171 * otherwise ensure the symbol position count matches sympos. 172 */ 173 if (args.addr == 0) 174 pr_err("symbol '%s' not found in symbol table\n", name); 175 else if (args.count > 1 && sympos == 0) { 176 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", 177 name, objname); 178 } else if (sympos != args.count && sympos > 0) { 179 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", 180 sympos, name, objname ? objname : "vmlinux"); 181 } else { 182 *addr = args.addr; 183 return 0; 184 } 185 186 *addr = 0; 187 return -EINVAL; 188 } 189 190 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, 191 unsigned int symndx, Elf_Shdr *relasec, 192 const char *sec_objname) 193 { 194 int i, cnt, ret; 195 char sym_objname[MODULE_NAME_LEN]; 196 char sym_name[KSYM_NAME_LEN]; 197 Elf_Rela *relas; 198 Elf_Sym *sym; 199 unsigned long sympos, addr; 200 bool sym_vmlinux; 201 bool sec_vmlinux = !strcmp(sec_objname, "vmlinux"); 202 203 /* 204 * Since the field widths for sym_objname and sym_name in the sscanf() 205 * call are hard-coded and correspond to MODULE_NAME_LEN and 206 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN 207 * and KSYM_NAME_LEN have the values we expect them to have. 208 * 209 * Because the value of MODULE_NAME_LEN can differ among architectures, 210 * we use the smallest/strictest upper bound possible (56, based on 211 * the current definition of MODULE_NAME_LEN) to prevent overflows. 212 */ 213 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512); 214 215 relas = (Elf_Rela *) relasec->sh_addr; 216 /* For each rela in this klp relocation section */ 217 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 218 sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); 219 if (sym->st_shndx != SHN_LIVEPATCH) { 220 pr_err("symbol %s is not marked as a livepatch symbol\n", 221 strtab + sym->st_name); 222 return -EINVAL; 223 } 224 225 /* Format: .klp.sym.sym_objname.sym_name,sympos */ 226 cnt = sscanf(strtab + sym->st_name, 227 ".klp.sym.%55[^.].%511[^,],%lu", 228 sym_objname, sym_name, &sympos); 229 if (cnt != 3) { 230 pr_err("symbol %s has an incorrectly formatted name\n", 231 strtab + sym->st_name); 232 return -EINVAL; 233 } 234 235 sym_vmlinux = !strcmp(sym_objname, "vmlinux"); 236 237 /* 238 * Prevent module-specific KLP rela sections from referencing 239 * vmlinux symbols. This helps prevent ordering issues with 240 * module special section initializations. Presumably such 241 * symbols are exported and normal relas can be used instead. 242 */ 243 if (!sec_vmlinux && sym_vmlinux) { 244 pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n", 245 sym_name); 246 return -EINVAL; 247 } 248 249 /* klp_find_object_symbol() treats a NULL objname as vmlinux */ 250 ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname, 251 sym_name, sympos, &addr); 252 if (ret) 253 return ret; 254 255 sym->st_value = addr; 256 } 257 258 return 0; 259 } 260 261 void __weak clear_relocate_add(Elf_Shdr *sechdrs, 262 const char *strtab, 263 unsigned int symindex, 264 unsigned int relsec, 265 struct module *me) 266 { 267 } 268 269 /* 270 * At a high-level, there are two types of klp relocation sections: those which 271 * reference symbols which live in vmlinux; and those which reference symbols 272 * which live in other modules. This function is called for both types: 273 * 274 * 1) When a klp module itself loads, the module code calls this function to 275 * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections). 276 * These relocations are written to the klp module text to allow the patched 277 * code/data to reference unexported vmlinux symbols. They're written as 278 * early as possible to ensure that other module init code (.e.g., 279 * jump_label_apply_nops) can access any unexported vmlinux symbols which 280 * might be referenced by the klp module's special sections. 281 * 282 * 2) When a to-be-patched module loads -- or is already loaded when a 283 * corresponding klp module loads -- klp code calls this function to write 284 * module-specific klp relocations (.klp.rela.{module}.* sections). These 285 * are written to the klp module text to allow the patched code/data to 286 * reference symbols which live in the to-be-patched module or one of its 287 * module dependencies. Exported symbols are supported, in addition to 288 * unexported symbols, in order to enable late module patching, which allows 289 * the to-be-patched module to be loaded and patched sometime *after* the 290 * klp module is loaded. 291 */ 292 static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, 293 const char *shstrtab, const char *strtab, 294 unsigned int symndx, unsigned int secndx, 295 const char *objname, bool apply) 296 { 297 int cnt, ret; 298 char sec_objname[MODULE_NAME_LEN]; 299 Elf_Shdr *sec = sechdrs + secndx; 300 301 /* 302 * Format: .klp.rela.sec_objname.section_name 303 * See comment in klp_resolve_symbols() for an explanation 304 * of the selected field width value. 305 */ 306 cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]", 307 sec_objname); 308 if (cnt != 1) { 309 pr_err("section %s has an incorrectly formatted name\n", 310 shstrtab + sec->sh_name); 311 return -EINVAL; 312 } 313 314 if (strcmp(objname ? objname : "vmlinux", sec_objname)) 315 return 0; 316 317 if (apply) { 318 ret = klp_resolve_symbols(sechdrs, strtab, symndx, 319 sec, sec_objname); 320 if (ret) 321 return ret; 322 323 return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); 324 } 325 326 clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod); 327 return 0; 328 } 329 330 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, 331 const char *shstrtab, const char *strtab, 332 unsigned int symndx, unsigned int secndx, 333 const char *objname) 334 { 335 return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx, 336 secndx, objname, true); 337 } 338 339 /* 340 * Sysfs Interface 341 * 342 * /sys/kernel/livepatch 343 * /sys/kernel/livepatch/<patch> 344 * /sys/kernel/livepatch/<patch>/enabled 345 * /sys/kernel/livepatch/<patch>/transition 346 * /sys/kernel/livepatch/<patch>/force 347 * /sys/kernel/livepatch/<patch>/replace 348 * /sys/kernel/livepatch/<patch>/stack_order 349 * /sys/kernel/livepatch/<patch>/<object> 350 * /sys/kernel/livepatch/<patch>/<object>/patched 351 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 352 */ 353 static int __klp_disable_patch(struct klp_patch *patch); 354 355 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 356 const char *buf, size_t count) 357 { 358 struct klp_patch *patch; 359 int ret; 360 bool enabled; 361 362 ret = kstrtobool(buf, &enabled); 363 if (ret) 364 return ret; 365 366 patch = container_of(kobj, struct klp_patch, kobj); 367 368 mutex_lock(&klp_mutex); 369 370 if (patch->enabled == enabled) { 371 /* already in requested state */ 372 ret = -EINVAL; 373 goto out; 374 } 375 376 /* 377 * Allow to reverse a pending transition in both ways. It might be 378 * necessary to complete the transition without forcing and breaking 379 * the system integrity. 380 * 381 * Do not allow to re-enable a disabled patch. 382 */ 383 if (patch == klp_transition_patch) 384 klp_reverse_transition(); 385 else if (!enabled) 386 ret = __klp_disable_patch(patch); 387 else 388 ret = -EINVAL; 389 390 out: 391 mutex_unlock(&klp_mutex); 392 393 if (ret) 394 return ret; 395 return count; 396 } 397 398 static ssize_t enabled_show(struct kobject *kobj, 399 struct kobj_attribute *attr, char *buf) 400 { 401 struct klp_patch *patch; 402 403 patch = container_of(kobj, struct klp_patch, kobj); 404 return sysfs_emit(buf, "%d\n", patch->enabled); 405 } 406 407 static ssize_t transition_show(struct kobject *kobj, 408 struct kobj_attribute *attr, char *buf) 409 { 410 struct klp_patch *patch; 411 412 patch = container_of(kobj, struct klp_patch, kobj); 413 return sysfs_emit(buf, "%d\n", patch == klp_transition_patch); 414 } 415 416 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 417 const char *buf, size_t count) 418 { 419 struct klp_patch *patch; 420 int ret; 421 bool val; 422 423 ret = kstrtobool(buf, &val); 424 if (ret) 425 return ret; 426 427 if (!val) 428 return count; 429 430 mutex_lock(&klp_mutex); 431 432 patch = container_of(kobj, struct klp_patch, kobj); 433 if (patch != klp_transition_patch) { 434 mutex_unlock(&klp_mutex); 435 return -EINVAL; 436 } 437 438 klp_force_transition(); 439 440 mutex_unlock(&klp_mutex); 441 442 return count; 443 } 444 445 static ssize_t replace_show(struct kobject *kobj, 446 struct kobj_attribute *attr, char *buf) 447 { 448 struct klp_patch *patch; 449 450 patch = container_of(kobj, struct klp_patch, kobj); 451 return sysfs_emit(buf, "%d\n", patch->replace); 452 } 453 454 static ssize_t stack_order_show(struct kobject *kobj, 455 struct kobj_attribute *attr, char *buf) 456 { 457 struct klp_patch *patch, *this_patch; 458 int stack_order = 0; 459 460 this_patch = container_of(kobj, struct klp_patch, kobj); 461 462 mutex_lock(&klp_mutex); 463 464 klp_for_each_patch(patch) { 465 stack_order++; 466 if (patch == this_patch) 467 break; 468 } 469 470 mutex_unlock(&klp_mutex); 471 472 return sysfs_emit(buf, "%d\n", stack_order); 473 } 474 475 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 476 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); 477 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); 478 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace); 479 static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order); 480 static struct attribute *klp_patch_attrs[] = { 481 &enabled_kobj_attr.attr, 482 &transition_kobj_attr.attr, 483 &force_kobj_attr.attr, 484 &replace_kobj_attr.attr, 485 &stack_order_kobj_attr.attr, 486 NULL 487 }; 488 ATTRIBUTE_GROUPS(klp_patch); 489 490 static ssize_t patched_show(struct kobject *kobj, 491 struct kobj_attribute *attr, char *buf) 492 { 493 struct klp_object *obj; 494 495 obj = container_of(kobj, struct klp_object, kobj); 496 return sysfs_emit(buf, "%d\n", obj->patched); 497 } 498 499 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched); 500 static struct attribute *klp_object_attrs[] = { 501 &patched_kobj_attr.attr, 502 NULL, 503 }; 504 ATTRIBUTE_GROUPS(klp_object); 505 506 static void klp_free_object_dynamic(struct klp_object *obj) 507 { 508 kfree(obj->name); 509 kfree(obj); 510 } 511 512 static void klp_init_func_early(struct klp_object *obj, 513 struct klp_func *func); 514 static void klp_init_object_early(struct klp_patch *patch, 515 struct klp_object *obj); 516 517 static struct klp_object *klp_alloc_object_dynamic(const char *name, 518 struct klp_patch *patch) 519 { 520 struct klp_object *obj; 521 522 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 523 if (!obj) 524 return NULL; 525 526 if (name) { 527 obj->name = kstrdup(name, GFP_KERNEL); 528 if (!obj->name) { 529 kfree(obj); 530 return NULL; 531 } 532 } 533 534 klp_init_object_early(patch, obj); 535 obj->dynamic = true; 536 537 return obj; 538 } 539 540 static void klp_free_func_nop(struct klp_func *func) 541 { 542 kfree(func->old_name); 543 kfree(func); 544 } 545 546 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, 547 struct klp_object *obj) 548 { 549 struct klp_func *func; 550 551 func = kzalloc(sizeof(*func), GFP_KERNEL); 552 if (!func) 553 return NULL; 554 555 if (old_func->old_name) { 556 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL); 557 if (!func->old_name) { 558 kfree(func); 559 return NULL; 560 } 561 } 562 563 klp_init_func_early(obj, func); 564 /* 565 * func->new_func is same as func->old_func. These addresses are 566 * set when the object is loaded, see klp_init_object_loaded(). 567 */ 568 func->old_sympos = old_func->old_sympos; 569 func->nop = true; 570 571 return func; 572 } 573 574 static int klp_add_object_nops(struct klp_patch *patch, 575 struct klp_object *old_obj) 576 { 577 struct klp_object *obj; 578 struct klp_func *func, *old_func; 579 580 obj = klp_find_object(patch, old_obj); 581 582 if (!obj) { 583 obj = klp_alloc_object_dynamic(old_obj->name, patch); 584 if (!obj) 585 return -ENOMEM; 586 } 587 588 klp_for_each_func(old_obj, old_func) { 589 func = klp_find_func(obj, old_func); 590 if (func) 591 continue; 592 593 func = klp_alloc_func_nop(old_func, obj); 594 if (!func) 595 return -ENOMEM; 596 } 597 598 return 0; 599 } 600 601 /* 602 * Add 'nop' functions which simply return to the caller to run the 603 * original function. 604 * 605 * They are added only when the atomic replace mode is used and only for 606 * functions which are currently livepatched but are no longer included 607 * in the new livepatch. 608 */ 609 static int klp_add_nops(struct klp_patch *patch) 610 { 611 struct klp_patch *old_patch; 612 struct klp_object *old_obj; 613 614 klp_for_each_patch(old_patch) { 615 klp_for_each_object(old_patch, old_obj) { 616 int err; 617 618 err = klp_add_object_nops(patch, old_obj); 619 if (err) 620 return err; 621 } 622 } 623 624 return 0; 625 } 626 627 static void klp_kobj_release_patch(struct kobject *kobj) 628 { 629 struct klp_patch *patch; 630 631 patch = container_of(kobj, struct klp_patch, kobj); 632 complete(&patch->finish); 633 } 634 635 static const struct kobj_type klp_ktype_patch = { 636 .release = klp_kobj_release_patch, 637 .sysfs_ops = &kobj_sysfs_ops, 638 .default_groups = klp_patch_groups, 639 }; 640 641 static void klp_kobj_release_object(struct kobject *kobj) 642 { 643 struct klp_object *obj; 644 645 obj = container_of(kobj, struct klp_object, kobj); 646 647 if (obj->dynamic) 648 klp_free_object_dynamic(obj); 649 } 650 651 static const struct kobj_type klp_ktype_object = { 652 .release = klp_kobj_release_object, 653 .sysfs_ops = &kobj_sysfs_ops, 654 .default_groups = klp_object_groups, 655 }; 656 657 static void klp_kobj_release_func(struct kobject *kobj) 658 { 659 struct klp_func *func; 660 661 func = container_of(kobj, struct klp_func, kobj); 662 663 if (func->nop) 664 klp_free_func_nop(func); 665 } 666 667 static const struct kobj_type klp_ktype_func = { 668 .release = klp_kobj_release_func, 669 .sysfs_ops = &kobj_sysfs_ops, 670 }; 671 672 static void __klp_free_funcs(struct klp_object *obj, bool nops_only) 673 { 674 struct klp_func *func, *tmp_func; 675 676 klp_for_each_func_safe(obj, func, tmp_func) { 677 if (nops_only && !func->nop) 678 continue; 679 680 list_del(&func->node); 681 kobject_put(&func->kobj); 682 } 683 } 684 685 /* Clean up when a patched object is unloaded */ 686 static void klp_free_object_loaded(struct klp_object *obj) 687 { 688 struct klp_func *func; 689 690 obj->mod = NULL; 691 692 klp_for_each_func(obj, func) { 693 func->old_func = NULL; 694 695 if (func->nop) 696 func->new_func = NULL; 697 } 698 } 699 700 static void __klp_free_objects(struct klp_patch *patch, bool nops_only) 701 { 702 struct klp_object *obj, *tmp_obj; 703 704 klp_for_each_object_safe(patch, obj, tmp_obj) { 705 __klp_free_funcs(obj, nops_only); 706 707 if (nops_only && !obj->dynamic) 708 continue; 709 710 list_del(&obj->node); 711 kobject_put(&obj->kobj); 712 } 713 } 714 715 static void klp_free_objects(struct klp_patch *patch) 716 { 717 __klp_free_objects(patch, false); 718 } 719 720 static void klp_free_objects_dynamic(struct klp_patch *patch) 721 { 722 __klp_free_objects(patch, true); 723 } 724 725 /* 726 * This function implements the free operations that can be called safely 727 * under klp_mutex. 728 * 729 * The operation must be completed by calling klp_free_patch_finish() 730 * outside klp_mutex. 731 */ 732 static void klp_free_patch_start(struct klp_patch *patch) 733 { 734 if (!list_empty(&patch->list)) 735 list_del(&patch->list); 736 737 klp_free_objects(patch); 738 } 739 740 /* 741 * This function implements the free part that must be called outside 742 * klp_mutex. 743 * 744 * It must be called after klp_free_patch_start(). And it has to be 745 * the last function accessing the livepatch structures when the patch 746 * gets disabled. 747 */ 748 static void klp_free_patch_finish(struct klp_patch *patch) 749 { 750 /* 751 * Avoid deadlock with enabled_store() sysfs callback by 752 * calling this outside klp_mutex. It is safe because 753 * this is called when the patch gets disabled and it 754 * cannot get enabled again. 755 */ 756 kobject_put(&patch->kobj); 757 wait_for_completion(&patch->finish); 758 759 /* Put the module after the last access to struct klp_patch. */ 760 if (!patch->forced) 761 module_put(patch->mod); 762 } 763 764 /* 765 * The livepatch might be freed from sysfs interface created by the patch. 766 * This work allows to wait until the interface is destroyed in a separate 767 * context. 768 */ 769 static void klp_free_patch_work_fn(struct work_struct *work) 770 { 771 struct klp_patch *patch = 772 container_of(work, struct klp_patch, free_work); 773 774 klp_free_patch_finish(patch); 775 } 776 777 void klp_free_patch_async(struct klp_patch *patch) 778 { 779 klp_free_patch_start(patch); 780 schedule_work(&patch->free_work); 781 } 782 783 void klp_free_replaced_patches_async(struct klp_patch *new_patch) 784 { 785 struct klp_patch *old_patch, *tmp_patch; 786 787 klp_for_each_patch_safe(old_patch, tmp_patch) { 788 if (old_patch == new_patch) 789 return; 790 klp_free_patch_async(old_patch); 791 } 792 } 793 794 static int klp_init_func(struct klp_object *obj, struct klp_func *func) 795 { 796 if (!func->old_name) 797 return -EINVAL; 798 799 /* 800 * NOPs get the address later. The patched module must be loaded, 801 * see klp_init_object_loaded(). 802 */ 803 if (!func->new_func && !func->nop) 804 return -EINVAL; 805 806 if (strlen(func->old_name) >= KSYM_NAME_LEN) 807 return -EINVAL; 808 809 INIT_LIST_HEAD(&func->stack_node); 810 func->patched = false; 811 func->transition = false; 812 813 /* The format for the sysfs directory is <function,sympos> where sympos 814 * is the nth occurrence of this symbol in kallsyms for the patched 815 * object. If the user selects 0 for old_sympos, then 1 will be used 816 * since a unique symbol will be the first occurrence. 817 */ 818 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu", 819 func->old_name, 820 func->old_sympos ? func->old_sympos : 1); 821 } 822 823 static int klp_write_object_relocs(struct klp_patch *patch, 824 struct klp_object *obj, 825 bool apply) 826 { 827 int i, ret; 828 struct klp_modinfo *info = patch->mod->klp_info; 829 830 for (i = 1; i < info->hdr.e_shnum; i++) { 831 Elf_Shdr *sec = info->sechdrs + i; 832 833 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) 834 continue; 835 836 ret = klp_write_section_relocs(patch->mod, info->sechdrs, 837 info->secstrings, 838 patch->mod->core_kallsyms.strtab, 839 info->symndx, i, obj->name, apply); 840 if (ret) 841 return ret; 842 } 843 844 return 0; 845 } 846 847 static int klp_apply_object_relocs(struct klp_patch *patch, 848 struct klp_object *obj) 849 { 850 return klp_write_object_relocs(patch, obj, true); 851 } 852 853 static void klp_clear_object_relocs(struct klp_patch *patch, 854 struct klp_object *obj) 855 { 856 klp_write_object_relocs(patch, obj, false); 857 } 858 859 /* parts of the initialization that is done only when the object is loaded */ 860 static int klp_init_object_loaded(struct klp_patch *patch, 861 struct klp_object *obj) 862 { 863 struct klp_func *func; 864 int ret; 865 866 if (klp_is_module(obj)) { 867 /* 868 * Only write module-specific relocations here 869 * (.klp.rela.{module}.*). vmlinux-specific relocations were 870 * written earlier during the initialization of the klp module 871 * itself. 872 */ 873 ret = klp_apply_object_relocs(patch, obj); 874 if (ret) 875 return ret; 876 } 877 878 klp_for_each_func(obj, func) { 879 ret = klp_find_object_symbol(obj->name, func->old_name, 880 func->old_sympos, 881 (unsigned long *)&func->old_func); 882 if (ret) 883 return ret; 884 885 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, 886 &func->old_size, NULL); 887 if (!ret) { 888 pr_err("kallsyms size lookup failed for '%s'\n", 889 func->old_name); 890 return -ENOENT; 891 } 892 893 if (func->nop) 894 func->new_func = func->old_func; 895 896 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, 897 &func->new_size, NULL); 898 if (!ret) { 899 pr_err("kallsyms size lookup failed for '%s' replacement\n", 900 func->old_name); 901 return -ENOENT; 902 } 903 } 904 905 return 0; 906 } 907 908 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) 909 { 910 struct klp_func *func; 911 int ret; 912 const char *name; 913 914 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) 915 return -EINVAL; 916 917 obj->patched = false; 918 obj->mod = NULL; 919 920 klp_find_object_module(obj); 921 922 name = klp_is_module(obj) ? obj->name : "vmlinux"; 923 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name); 924 if (ret) 925 return ret; 926 927 klp_for_each_func(obj, func) { 928 ret = klp_init_func(obj, func); 929 if (ret) 930 return ret; 931 } 932 933 if (klp_is_object_loaded(obj)) 934 ret = klp_init_object_loaded(patch, obj); 935 936 return ret; 937 } 938 939 static void klp_init_func_early(struct klp_object *obj, 940 struct klp_func *func) 941 { 942 kobject_init(&func->kobj, &klp_ktype_func); 943 list_add_tail(&func->node, &obj->func_list); 944 } 945 946 static void klp_init_object_early(struct klp_patch *patch, 947 struct klp_object *obj) 948 { 949 INIT_LIST_HEAD(&obj->func_list); 950 kobject_init(&obj->kobj, &klp_ktype_object); 951 list_add_tail(&obj->node, &patch->obj_list); 952 } 953 954 static void klp_init_patch_early(struct klp_patch *patch) 955 { 956 struct klp_object *obj; 957 struct klp_func *func; 958 959 INIT_LIST_HEAD(&patch->list); 960 INIT_LIST_HEAD(&patch->obj_list); 961 kobject_init(&patch->kobj, &klp_ktype_patch); 962 patch->enabled = false; 963 patch->forced = false; 964 INIT_WORK(&patch->free_work, klp_free_patch_work_fn); 965 init_completion(&patch->finish); 966 967 klp_for_each_object_static(patch, obj) { 968 klp_init_object_early(patch, obj); 969 970 klp_for_each_func_static(obj, func) { 971 klp_init_func_early(obj, func); 972 } 973 } 974 } 975 976 static int klp_init_patch(struct klp_patch *patch) 977 { 978 struct klp_object *obj; 979 int ret; 980 981 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name); 982 if (ret) 983 return ret; 984 985 if (patch->replace) { 986 ret = klp_add_nops(patch); 987 if (ret) 988 return ret; 989 } 990 991 klp_for_each_object(patch, obj) { 992 ret = klp_init_object(patch, obj); 993 if (ret) 994 return ret; 995 } 996 997 list_add_tail(&patch->list, &klp_patches); 998 999 return 0; 1000 } 1001 1002 static int __klp_disable_patch(struct klp_patch *patch) 1003 { 1004 struct klp_object *obj; 1005 1006 if (WARN_ON(!patch->enabled)) 1007 return -EINVAL; 1008 1009 if (klp_transition_patch) 1010 return -EBUSY; 1011 1012 klp_init_transition(patch, KLP_TRANSITION_UNPATCHED); 1013 1014 klp_for_each_object(patch, obj) 1015 if (obj->patched) 1016 klp_pre_unpatch_callback(obj); 1017 1018 /* 1019 * Enforce the order of the func->transition writes in 1020 * klp_init_transition() and the TIF_PATCH_PENDING writes in 1021 * klp_start_transition(). In the rare case where klp_ftrace_handler() 1022 * is called shortly after klp_update_patch_state() switches the task, 1023 * this ensures the handler sees that func->transition is set. 1024 */ 1025 smp_wmb(); 1026 1027 klp_start_transition(); 1028 patch->enabled = false; 1029 klp_try_complete_transition(); 1030 1031 return 0; 1032 } 1033 1034 static int __klp_enable_patch(struct klp_patch *patch) 1035 { 1036 struct klp_object *obj; 1037 int ret; 1038 1039 if (klp_transition_patch) 1040 return -EBUSY; 1041 1042 if (WARN_ON(patch->enabled)) 1043 return -EINVAL; 1044 1045 pr_notice("enabling patch '%s'\n", patch->mod->name); 1046 1047 klp_init_transition(patch, KLP_TRANSITION_PATCHED); 1048 1049 /* 1050 * Enforce the order of the func->transition writes in 1051 * klp_init_transition() and the ops->func_stack writes in 1052 * klp_patch_object(), so that klp_ftrace_handler() will see the 1053 * func->transition updates before the handler is registered and the 1054 * new funcs become visible to the handler. 1055 */ 1056 smp_wmb(); 1057 1058 klp_for_each_object(patch, obj) { 1059 if (!klp_is_object_loaded(obj)) 1060 continue; 1061 1062 ret = klp_pre_patch_callback(obj); 1063 if (ret) { 1064 pr_warn("pre-patch callback failed for object '%s'\n", 1065 klp_is_module(obj) ? obj->name : "vmlinux"); 1066 goto err; 1067 } 1068 1069 ret = klp_patch_object(obj); 1070 if (ret) { 1071 pr_warn("failed to patch object '%s'\n", 1072 klp_is_module(obj) ? obj->name : "vmlinux"); 1073 goto err; 1074 } 1075 } 1076 1077 klp_start_transition(); 1078 patch->enabled = true; 1079 klp_try_complete_transition(); 1080 1081 return 0; 1082 err: 1083 pr_warn("failed to enable patch '%s'\n", patch->mod->name); 1084 1085 klp_cancel_transition(); 1086 return ret; 1087 } 1088 1089 /** 1090 * klp_enable_patch() - enable the livepatch 1091 * @patch: patch to be enabled 1092 * 1093 * Initializes the data structure associated with the patch, creates the sysfs 1094 * interface, performs the needed symbol lookups and code relocations, 1095 * registers the patched functions with ftrace. 1096 * 1097 * This function is supposed to be called from the livepatch module_init() 1098 * callback. 1099 * 1100 * Return: 0 on success, otherwise error 1101 */ 1102 int klp_enable_patch(struct klp_patch *patch) 1103 { 1104 int ret; 1105 struct klp_object *obj; 1106 1107 if (!patch || !patch->mod || !patch->objs) 1108 return -EINVAL; 1109 1110 klp_for_each_object_static(patch, obj) { 1111 if (!obj->funcs) 1112 return -EINVAL; 1113 } 1114 1115 1116 if (!is_livepatch_module(patch->mod)) { 1117 pr_err("module %s is not marked as a livepatch module\n", 1118 patch->mod->name); 1119 return -EINVAL; 1120 } 1121 1122 if (!klp_initialized()) 1123 return -ENODEV; 1124 1125 if (!klp_have_reliable_stack()) { 1126 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n"); 1127 pr_warn("The livepatch transition may never complete.\n"); 1128 } 1129 1130 mutex_lock(&klp_mutex); 1131 1132 if (!klp_is_patch_compatible(patch)) { 1133 pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n", 1134 patch->mod->name); 1135 mutex_unlock(&klp_mutex); 1136 return -EINVAL; 1137 } 1138 1139 if (!try_module_get(patch->mod)) { 1140 mutex_unlock(&klp_mutex); 1141 return -ENODEV; 1142 } 1143 1144 klp_init_patch_early(patch); 1145 1146 ret = klp_init_patch(patch); 1147 if (ret) 1148 goto err; 1149 1150 ret = __klp_enable_patch(patch); 1151 if (ret) 1152 goto err; 1153 1154 mutex_unlock(&klp_mutex); 1155 1156 return 0; 1157 1158 err: 1159 klp_free_patch_start(patch); 1160 1161 mutex_unlock(&klp_mutex); 1162 1163 klp_free_patch_finish(patch); 1164 1165 return ret; 1166 } 1167 EXPORT_SYMBOL_GPL(klp_enable_patch); 1168 1169 /* 1170 * This function unpatches objects from the replaced livepatches. 1171 * 1172 * We could be pretty aggressive here. It is called in the situation where 1173 * these structures are no longer accessed from the ftrace handler. 1174 * All functions are redirected by the klp_transition_patch. They 1175 * use either a new code or they are in the original code because 1176 * of the special nop function patches. 1177 * 1178 * The only exception is when the transition was forced. In this case, 1179 * klp_ftrace_handler() might still see the replaced patch on the stack. 1180 * Fortunately, it is carefully designed to work with removed functions 1181 * thanks to RCU. We only have to keep the patches on the system. Also 1182 * this is handled transparently by patch->module_put. 1183 */ 1184 void klp_unpatch_replaced_patches(struct klp_patch *new_patch) 1185 { 1186 struct klp_patch *old_patch; 1187 1188 klp_for_each_patch(old_patch) { 1189 if (old_patch == new_patch) 1190 return; 1191 1192 old_patch->enabled = false; 1193 klp_unpatch_objects(old_patch); 1194 } 1195 } 1196 1197 /* 1198 * This function removes the dynamically allocated 'nop' functions. 1199 * 1200 * We could be pretty aggressive. NOPs do not change the existing 1201 * behavior except for adding unnecessary delay by the ftrace handler. 1202 * 1203 * It is safe even when the transition was forced. The ftrace handler 1204 * will see a valid ops->func_stack entry thanks to RCU. 1205 * 1206 * We could even free the NOPs structures. They must be the last entry 1207 * in ops->func_stack. Therefore unregister_ftrace_function() is called. 1208 * It does the same as klp_synchronize_transition() to make sure that 1209 * nobody is inside the ftrace handler once the operation finishes. 1210 * 1211 * IMPORTANT: It must be called right after removing the replaced patches! 1212 */ 1213 void klp_discard_nops(struct klp_patch *new_patch) 1214 { 1215 klp_unpatch_objects_dynamic(klp_transition_patch); 1216 klp_free_objects_dynamic(klp_transition_patch); 1217 } 1218 1219 /* 1220 * Remove parts of patches that touch a given kernel module. The list of 1221 * patches processed might be limited. When limit is NULL, all patches 1222 * will be handled. 1223 */ 1224 static void klp_cleanup_module_patches_limited(struct module *mod, 1225 struct klp_patch *limit) 1226 { 1227 struct klp_patch *patch; 1228 struct klp_object *obj; 1229 1230 klp_for_each_patch(patch) { 1231 if (patch == limit) 1232 break; 1233 1234 klp_for_each_object(patch, obj) { 1235 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 1236 continue; 1237 1238 if (patch != klp_transition_patch) 1239 klp_pre_unpatch_callback(obj); 1240 1241 pr_notice("reverting patch '%s' on unloading module '%s'\n", 1242 patch->mod->name, obj->mod->name); 1243 klp_unpatch_object(obj); 1244 1245 klp_post_unpatch_callback(obj); 1246 klp_clear_object_relocs(patch, obj); 1247 klp_free_object_loaded(obj); 1248 break; 1249 } 1250 } 1251 } 1252 1253 int klp_module_coming(struct module *mod) 1254 { 1255 int ret; 1256 struct klp_patch *patch; 1257 struct klp_object *obj; 1258 1259 if (WARN_ON(mod->state != MODULE_STATE_COMING)) 1260 return -EINVAL; 1261 1262 if (!strcmp(mod->name, "vmlinux")) { 1263 pr_err("vmlinux.ko: invalid module name\n"); 1264 return -EINVAL; 1265 } 1266 1267 mutex_lock(&klp_mutex); 1268 /* 1269 * Each module has to know that klp_module_coming() 1270 * has been called. We never know what module will 1271 * get patched by a new patch. 1272 */ 1273 mod->klp_alive = true; 1274 1275 klp_for_each_patch(patch) { 1276 klp_for_each_object(patch, obj) { 1277 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 1278 continue; 1279 1280 obj->mod = mod; 1281 1282 ret = klp_init_object_loaded(patch, obj); 1283 if (ret) { 1284 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 1285 patch->mod->name, obj->mod->name, ret); 1286 goto err; 1287 } 1288 1289 pr_notice("applying patch '%s' to loading module '%s'\n", 1290 patch->mod->name, obj->mod->name); 1291 1292 ret = klp_pre_patch_callback(obj); 1293 if (ret) { 1294 pr_warn("pre-patch callback failed for object '%s'\n", 1295 obj->name); 1296 goto err; 1297 } 1298 1299 ret = klp_patch_object(obj); 1300 if (ret) { 1301 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 1302 patch->mod->name, obj->mod->name, ret); 1303 1304 klp_post_unpatch_callback(obj); 1305 goto err; 1306 } 1307 1308 if (patch != klp_transition_patch) 1309 klp_post_patch_callback(obj); 1310 1311 break; 1312 } 1313 } 1314 1315 mutex_unlock(&klp_mutex); 1316 1317 return 0; 1318 1319 err: 1320 /* 1321 * If a patch is unsuccessfully applied, return 1322 * error to the module loader. 1323 */ 1324 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 1325 patch->mod->name, obj->mod->name, obj->mod->name); 1326 mod->klp_alive = false; 1327 obj->mod = NULL; 1328 klp_cleanup_module_patches_limited(mod, patch); 1329 mutex_unlock(&klp_mutex); 1330 1331 return ret; 1332 } 1333 1334 void klp_module_going(struct module *mod) 1335 { 1336 if (WARN_ON(mod->state != MODULE_STATE_GOING && 1337 mod->state != MODULE_STATE_COMING)) 1338 return; 1339 1340 mutex_lock(&klp_mutex); 1341 /* 1342 * Each module has to know that klp_module_going() 1343 * has been called. We never know what module will 1344 * get patched by a new patch. 1345 */ 1346 mod->klp_alive = false; 1347 1348 klp_cleanup_module_patches_limited(mod, NULL); 1349 1350 mutex_unlock(&klp_mutex); 1351 } 1352 1353 static int __init klp_init(void) 1354 { 1355 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1356 if (!klp_root_kobj) 1357 return -ENOMEM; 1358 1359 return 0; 1360 } 1361 1362 module_init(klp_init); 1363