Lines Matching full:nmi

18 #include <linux/nmi.h>
31 #include <asm/nmi.h>
40 #include <trace/events/nmi.h>
90 * Prevent NMI reason port (0x61) being accessed simultaneously, can
91 * only be used in NMI handler.
127 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", in nmi_check_duration()
160 /* return total number of NMI events handled */ in nmi_handle()
177 * internal NMI handler call chains (SERR and IO_CHECK). in __register_nmi_handler()
206 * the name passed in to describe the nmi handler in unregister_nmi_handler()
211 "Trying to free NMI (%s) from NMI context!\n", n->name); in unregister_nmi_handler()
233 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", in pci_serr_error()
237 nmi_panic(regs, "NMI: Not continuing"); in pci_serr_error()
257 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", in io_check_error()
262 nmi_panic(regs, "NMI IOCK error: Not continuing"); in io_check_error()
265 * If we end up here, it means we have received an NMI while in io_check_error()
296 * if it caused the NMI) in unknown_nmi_error()
306 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", in unknown_nmi_error()
310 nmi_panic(regs, "NMI: Not continuing"); in unknown_nmi_error()
326 * CPU-specific NMI must be processed before non-CPU-specific in default_do_nmi()
327 * NMI, otherwise we may lose it, because the CPU-specific in default_do_nmi()
328 * NMI can not be detected/processed on other CPUs. in default_do_nmi()
333 * be two NMI or more than two NMIs (any thing over two is dropped in default_do_nmi()
334 * due to NMI being edge-triggered). If this is the second half in default_do_nmi()
335 * of the back-to-back NMI, assume we dropped things and process in default_do_nmi()
336 * more handlers. Otherwise reset the 'swallow' NMI behaviour in default_do_nmi()
354 * There are cases when a NMI handler handles multiple in default_do_nmi()
355 * events in the current NMI. One of these events may in default_do_nmi()
356 * be queued for in the next NMI. Because the event is in default_do_nmi()
357 * already handled, the next NMI will result in an unknown in default_do_nmi()
358 * NMI. Instead lets flag this for a potential NMI to in default_do_nmi()
367 * Non-CPU-specific NMI: NMI sources can be processed on any CPU. in default_do_nmi()
388 * Reassert NMI in case it became active in default_do_nmi()
400 * Only one NMI can be latched at a time. To handle in default_do_nmi()
401 * this we may process multiple nmi handlers at once to in default_do_nmi()
402 * cover the case where an NMI is dropped. The downside in default_do_nmi()
403 * to this approach is we may process an NMI prematurely, in default_do_nmi()
404 * while its real NMI is sitting latched. This will cause in default_do_nmi()
405 * an unknown NMI on the next run of the NMI processing. in default_do_nmi()
410 * of a back-to-back NMI, so we flag that condition too. in default_do_nmi()
413 * NMI previously and we swallow it. Otherwise we reset in default_do_nmi()
417 * a 'real' unknown NMI. For example, while processing in default_do_nmi()
418 * a perf NMI another perf NMI comes in along with a in default_do_nmi()
419 * 'real' unknown NMI. These two NMIs get combined into in default_do_nmi()
420 * one (as described above). When the next NMI gets in default_do_nmi()
422 * no one will know that there was a 'real' unknown NMI sent in default_do_nmi()
424 * perf NMI returns two events handled then the second in default_do_nmi()
425 * NMI will get eaten by the logic below, again losing a in default_do_nmi()
426 * 'real' unknown NMI. But this is the best we can do in default_do_nmi()
440 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
443 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
444 * if the outer NMI came from kernel mode, but we can still nest if the
445 * outer NMI came from user mode.
453 * When no NMI is in progress, it is in the "not running" state.
454 * When an NMI comes in, it goes into the "executing" state.
455 * Normally, if another NMI is triggered, it does not interrupt
456 * the running NMI and the HW will simply latch it so that when
457 * the first NMI finishes, it will restart the second NMI.
459 * when one is running, are ignored. Only one NMI is restarted.)
461 * If an NMI executes an iret, another NMI can preempt it. We do not
462 * want to allow this new NMI to run, but we want to execute it when the
464 * the first NMI will perform a dec_return, if the result is zero
465 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
468 * rerun the NMI handler again, and restart the 'latched' NMI.
475 * In case the NMI takes a page fault, we need to save off the CR2
476 * because the NMI could have preempted another page fault and corrupt
479 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
480 * Otherwise, there would be a race of another nested NMI coming in
582 /* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */
585 /* | | | NMI handler has been invoked. */
629 msgp = "CPU entered NMI handler function, but has not exited"; in nmi_backtrace_stall_check()
640 msghp = " (CPU currently in NMI handler function)"; in nmi_backtrace_stall_check()
642 msghp = " (CPU exited one NMI handler function)"; in nmi_backtrace_stall_check()
661 /* reset the back-to-back NMI logic */