Lines Matching +full:cpu +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpu.h>
35 * Handle system-wide NMI events generated by the global 'power nmi' command.
37 * Basic operation is to field the NMI interrupt on each CPU and wait
38 * until all CPU's have arrived into the nmi handler. If some CPU's do not
48 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
64 /* Non-zero indicates newer SMM NMI handler present */
81 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset)) argument
89 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
90 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
112 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg)); in param_get_local64()
118 local64_set((local64_t *)kp->arg, 0); in param_set_local64()
184 { "dump", "dump process stack for each cpu" },
185 { "ips", "dump Inst Ptr info for each cpu" },
205 strncpy(arg, val, ACTION_LEN - 1); in param_set_action()
206 arg[ACTION_LEN - 1] = '\0'; in param_set_action()
223 pr_err("UV: %-8s - %s\n", in param_set_action()
225 return -EINVAL; in param_set_action()
249 uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0"; in uv_nmi_setup_mmrs()
259 uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0"; in uv_nmi_setup_mmrs()
290 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr); in uv_nmi_test_mmr()
291 atomic_inc(&hub_nmi->read_mmr_count); in uv_nmi_test_mmr()
292 return !!(hub_nmi->nmi_value & nmi_mmr_pending); in uv_nmi_test_mmr()
312 static void uv_init_hubless_pch_io(int offset, int mask, int data) in uv_init_hubless_pch_io() argument
314 int *addr = PCH_PCR_GPIO_ADDRESS(offset); in uv_init_hubless_pch_io()
344 unsigned int offset; member
349 .offset = 0x84,
356 .offset = 0x104,
361 .offset = 0x124,
366 .offset = 0x144,
371 .offset = 0x164,
378 .offset = 0x114,
383 .offset = 0x134,
388 .offset = 0x154,
393 .offset = 0x174,
400 .offset = 0x4c0,
430 * = 1 # Disable the output buffer; i.e. Hi-Z
440 .offset = 0x4c4,
458 uv_init_hubless_pch_io(init_nmi[i].offset, in uv_init_hubless_pch_d0()
469 hub_nmi->nmi_value = status; in uv_nmi_test_hubless()
470 atomic_inc(&hub_nmi->read_mmr_count); in uv_nmi_test_hubless()
483 if (hub_nmi->hub_present) in uv_test_nmi()
486 if (hub_nmi->pch_owner) /* Only PCH owner can check status */ in uv_test_nmi()
489 return -1; in uv_test_nmi()
493 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
494 * return true. If first CPU in on the system, set global "in_nmi" flag.
496 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) in uv_set_in_nmi() argument
498 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1); in uv_set_in_nmi()
501 atomic_set(&hub_nmi->cpu_owner, cpu); in uv_set_in_nmi()
503 atomic_set(&uv_nmi_cpu, cpu); in uv_set_in_nmi()
505 atomic_inc(&hub_nmi->nmi_count); in uv_set_in_nmi()
513 int cpu = smp_processor_id(); in uv_check_nmi() local
521 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi()
525 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { in uv_check_nmi()
530 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
535 /* A non-PCH node in a hubless system waits for NMI */ in uv_check_nmi()
540 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_check_nmi()
548 /* Re-check hub in_nmi flag */ in uv_check_nmi()
549 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi()
561 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
566 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_check_nmi()
577 static inline void uv_clear_nmi(int cpu) in uv_clear_nmi() argument
581 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { in uv_clear_nmi()
582 atomic_set(&hub_nmi->cpu_owner, -1); in uv_clear_nmi()
583 atomic_set(&hub_nmi->in_nmi, 0); in uv_clear_nmi()
584 if (hub_nmi->hub_present) in uv_clear_nmi()
588 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_clear_nmi()
592 /* Ping non-responding CPU's attempting to force them into the NMI handler */
595 int cpu; in uv_nmi_nr_cpus_ping() local
597 for_each_cpu(cpu, uv_nmi_cpu_mask) in uv_nmi_nr_cpus_ping()
598 uv_cpu_nmi_per(cpu).pinging = 1; in uv_nmi_nr_cpus_ping()
600 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); in uv_nmi_nr_cpus_ping()
603 /* Clean up flags for CPU's that ignored both NMI and ping */
606 int cpu; in uv_nmi_cleanup_mask() local
608 for_each_cpu(cpu, uv_nmi_cpu_mask) { in uv_nmi_cleanup_mask()
609 uv_cpu_nmi_per(cpu).pinging = 0; in uv_nmi_cleanup_mask()
610 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; in uv_nmi_cleanup_mask()
611 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); in uv_nmi_cleanup_mask()
615 /* Loop waiting as CPU's enter NMI handler */
620 int cpu = smp_processor_id(); in uv_nmi_wait_cpus() local
626 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus()
629 /* PCH NMI causes only one CPU to respond */ in uv_nmi_wait_cpus()
631 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); in uv_nmi_wait_cpus()
632 return n - k - 1; in uv_nmi_wait_cpus()
650 if (last_k != k) { /* abort if no new CPU's coming in */ in uv_nmi_wait_cpus()
656 /* Extend delay if waiting only for CPU 0: */ in uv_nmi_wait_cpus()
657 if (waiting && (n - k) == 1 && in uv_nmi_wait_cpus()
664 return n - k; in uv_nmi_wait_cpus()
667 /* Wait until all slave CPU's have entered UV NMI handler */
670 /* Indicate this CPU is in: */ in uv_nmi_wait()
673 /* If not the first CPU in (the master), then we are a slave CPU */ in uv_nmi_wait()
678 /* Wait for all other CPU's to gather here */ in uv_nmi_wait()
689 /* If all CPU's are in, then done */ in uv_nmi_wait()
705 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n", in uv_nmi_dump_cpu_ip_hdr()
706 "CPU", "PID", "COMMAND", "IP"); in uv_nmi_dump_cpu_ip_hdr()
710 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) in uv_nmi_dump_cpu_ip() argument
712 pr_info("UV: %4d %6d %-32.32s %pS", in uv_nmi_dump_cpu_ip()
713 cpu, current->pid, current->comm, (void *)regs->ip); in uv_nmi_dump_cpu_ip()
717 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
720 * action "ips" only displays instruction pointers for the non-idle CPU's.
723 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) in uv_nmi_dump_state_cpu() argument
727 if (cpu == 0) in uv_nmi_dump_state_cpu()
730 if (current->pid != 0 || !uv_nmi_action_is("ips")) in uv_nmi_dump_state_cpu()
731 uv_nmi_dump_cpu_ip(cpu, regs); in uv_nmi_dump_state_cpu()
734 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu); in uv_nmi_dump_state_cpu()
741 /* Trigger a slave CPU to dump it's state */
742 static void uv_nmi_trigger_dump(int cpu) in uv_nmi_trigger_dump() argument
746 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) in uv_nmi_trigger_dump()
749 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; in uv_nmi_trigger_dump()
753 if (uv_cpu_nmi_per(cpu).state in uv_nmi_trigger_dump()
756 } while (--retry > 0); in uv_nmi_trigger_dump()
758 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); in uv_nmi_trigger_dump()
759 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; in uv_nmi_trigger_dump()
762 /* Wait until all CPU's ready to exit */
776 /* Current "health" check is to check which CPU's are responsive */
777 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master) in uv_nmi_action_health() argument
781 int out = num_online_cpus() - in; in uv_nmi_action_health()
783 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out); in uv_nmi_action_health()
792 /* Walk through CPU list and dump state of each */
793 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) in uv_nmi_dump_state() argument
800 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n", in uv_nmi_dump_state()
802 atomic_read(&uv_nmi_cpus_in_nmi), cpu); in uv_nmi_dump_state()
809 else if (tcpu == cpu) in uv_nmi_dump_state()
824 uv_nmi_dump_state_cpu(cpu, regs); in uv_nmi_dump_state()
840 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) in uv_nmi_kdump() argument
844 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu); in uv_nmi_kdump()
862 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) in uv_nmi_kdump() argument
884 return -1; in uv_nmi_kdb_reason()
895 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
905 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, in uv_call_kgdb_kdb()
922 kgdb_nmicallback(cpu, regs); in uv_call_kgdb_kdb()
928 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
940 int cpu = smp_processor_id(); in uv_handle_nmi() local
952 /* Indicate we are the first CPU into the NMI handler */ in uv_handle_nmi()
953 master = (atomic_read(&uv_nmi_cpu) == cpu); in uv_handle_nmi()
957 uv_nmi_kdump(cpu, master, regs); in uv_handle_nmi()
964 /* Pause as all CPU's enter the NMI handler */ in uv_handle_nmi()
969 uv_nmi_action_health(cpu, regs, master); in uv_handle_nmi()
971 uv_nmi_dump_state(cpu, regs, master); in uv_handle_nmi()
973 uv_call_kgdb_kdb(cpu, regs, master); in uv_handle_nmi()
984 uv_clear_nmi(cpu); in uv_handle_nmi()
990 atomic_set(&uv_nmi_cpus_in_nmi, -1); in uv_handle_nmi()
991 atomic_set(&uv_nmi_cpu, -1); in uv_handle_nmi()
1004 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
1037 * Unmask NMI on all CPU's in uv_nmi_init()
1048 int cpu; in uv_nmi_setup_common() local
1054 for_each_present_cpu(cpu) { in uv_nmi_setup_common()
1055 int nid = cpu_to_node(cpu); in uv_nmi_setup_common()
1060 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); in uv_nmi_setup_common()
1061 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); in uv_nmi_setup_common()
1062 uv_hub_nmi_list[nid]->hub_present = hubbed; in uv_nmi_setup_common()
1063 uv_hub_nmi_list[nid]->pch_owner = (nid == 0); in uv_nmi_setup_common()
1065 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; in uv_nmi_setup_common()