Lines Matching +full:wakeup +full:- +full:event +full:- +full:action

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
43 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
53 * host should re-check SCI_EVT after the first time the SCI_EVT
56 * at any later time could indicate another event. Normally such
57 * kind of EC firmware has implemented an event queue and will
58 * return 0x00 to indicate "no outstanding event".
61 * event value in the data register (EC_DATA), the target can safely
63 * event is being handled by the host. The host then should check
64 * SCI_EVT right after reading the event response from the data
66 * EVENT: After seeing the event response read from the data register
97 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
138 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
142 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
188 /* --------------------------------------------------------------------------
190 * -------------------------------------------------------------------------- */
230 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
232 /* --------------------------------------------------------------------------
234 * -------------------------------------------------------------------------- */
238 return test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_started()
239 !test_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_started()
246 * (boot/resume), OSPMs shouldn't enable the event handling, only in acpi_ec_event_enabled()
249 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in acpi_ec_event_enabled()
252 * However, disabling the event handling is experimental for late in acpi_ec_event_enabled()
255 * 1. true: The EC event handling is disabled before entering in acpi_ec_event_enabled()
257 * 2. false: The EC event handling is automatically disabled as in acpi_ec_event_enabled()
263 return test_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_event_enabled()
268 return ec->reference_count == 1; in acpi_ec_flushed()
271 /* --------------------------------------------------------------------------
273 * -------------------------------------------------------------------------- */
277 u8 x = inb(ec->command_addr); in acpi_ec_read_status()
292 u8 x = inb(ec->data_addr); in acpi_ec_read_data()
294 ec->timestamp = jiffies; in acpi_ec_read_data()
302 outb(command, ec->command_addr); in acpi_ec_write_cmd()
303 ec->timestamp = jiffies; in acpi_ec_write_cmd()
309 outb(data, ec->data_addr); in acpi_ec_write_data()
310 ec->timestamp = jiffies; in acpi_ec_write_data()
334 /* --------------------------------------------------------------------------
336 * -------------------------------------------------------------------------- */
342 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); in acpi_ec_is_gpe_raised()
349 acpi_enable_gpe(NULL, ec->gpe); in acpi_ec_enable_gpe()
351 BUG_ON(ec->reference_count < 1); in acpi_ec_enable_gpe()
352 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_enable_gpe()
357 * software need to manually trigger a pseudo GPE event on in acpi_ec_enable_gpe()
368 acpi_disable_gpe(NULL, ec->gpe); in acpi_ec_disable_gpe()
370 BUG_ON(ec->reference_count < 1); in acpi_ec_disable_gpe()
371 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_disable_gpe()
389 acpi_clear_gpe(NULL, ec->gpe); in acpi_ec_clear_gpe()
392 /* --------------------------------------------------------------------------
394 * -------------------------------------------------------------------------- */
398 ec->reference_count++; in acpi_ec_submit_request()
399 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_submit_request()
400 ec->gpe >= 0 && ec->reference_count == 1) in acpi_ec_submit_request()
408 ec->reference_count--; in acpi_ec_complete_request()
409 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_complete_request()
410 ec->gpe >= 0 && ec->reference_count == 0) in acpi_ec_complete_request()
414 wake_up(&ec->wait); in acpi_ec_complete_request()
419 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_mask_events()
420 if (ec->gpe >= 0) in acpi_ec_mask_events()
423 disable_irq_nosync(ec->irq); in acpi_ec_mask_events()
426 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_mask_events()
432 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_unmask_events()
433 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_unmask_events()
434 if (ec->gpe >= 0) in acpi_ec_unmask_events()
437 enable_irq(ec->irq); in acpi_ec_unmask_events()
444 * acpi_ec_submit_flushable_request() - Increase the reference count unless
449 * This function must be used before taking a new action that should hold
450 * the reference count. If this function returns false, then the action
467 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { in acpi_ec_submit_query()
470 ec->nr_pending_queries++; in acpi_ec_submit_query()
471 queue_work(ec_wq, &ec->work); in acpi_ec_submit_query()
477 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) in acpi_ec_complete_query()
485 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_enable_event()
486 ec_log_drv("event unblocked"); in __acpi_ec_enable_event()
488 * Unconditionally invoke this once after enabling the event in __acpi_ec_enable_event()
496 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_disable_event()
497 ec_log_drv("event blocked"); in __acpi_ec_disable_event()
524 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enable_event()
527 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enable_event()
537 drain_workqueue(ec_wq); /* flush ec->work */ in __acpi_ec_flush_work()
545 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_disable_event()
547 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_disable_event()
571 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_guard_event()
573 * If firmware SCI_EVT clearing timing is "event", we actually in acpi_ec_guard_event()
575 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an in acpi_ec_guard_event()
588 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || in acpi_ec_guard_event()
589 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) in acpi_ec_guard_event()
591 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_guard_event()
600 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_polled()
601 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) in ec_transaction_polled()
603 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_polled()
612 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_completed()
613 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) in ec_transaction_completed()
615 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_completed()
621 ec->curr->flags |= flag; in ec_transaction_transition()
622 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { in ec_transaction_transition()
631 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); in ec_transaction_transition()
639 bool wakeup = false; in advance_transaction() local
645 * ensure a hardware STS 0->1 change after this clearing can always in advance_transaction()
648 if (ec->gpe >= 0) in advance_transaction()
652 t = ec->curr; in advance_transaction()
657 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { in advance_transaction()
659 (!ec->nr_pending_queries || in advance_transaction()
660 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { in advance_transaction()
661 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); in advance_transaction()
667 if (t->flags & ACPI_EC_COMMAND_POLL) { in advance_transaction()
668 if (t->wlen > t->wi) { in advance_transaction()
670 acpi_ec_write_data(ec, t->wdata[t->wi++]); in advance_transaction()
673 } else if (t->rlen > t->ri) { in advance_transaction()
675 t->rdata[t->ri++] = acpi_ec_read_data(ec); in advance_transaction()
676 if (t->rlen == t->ri) { in advance_transaction()
678 if (t->command == ACPI_EC_COMMAND_QUERY) in advance_transaction()
681 wakeup = true; in advance_transaction()
685 } else if (t->wlen == t->wi && in advance_transaction()
688 wakeup = true; in advance_transaction()
692 acpi_ec_write_cmd(ec, t->command); in advance_transaction()
703 if (t->irq_count < ec_storm_threshold) in advance_transaction()
704 ++t->irq_count; in advance_transaction()
706 if (t->irq_count == ec_storm_threshold) in advance_transaction()
713 if (wakeup && in_interrupt()) in advance_transaction()
714 wake_up(&ec->wait); in advance_transaction()
719 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; in start_transaction()
720 ec->curr->flags = 0; in start_transaction()
725 unsigned long guard = usecs_to_jiffies(ec->polling_guard); in ec_guard()
726 unsigned long timeout = ec->timestamp + guard; in ec_guard()
730 if (ec->busy_polling) { in ec_guard()
742 * for event clearing mode "event" before the in ec_guard()
749 if (wait_event_timeout(ec->wait, in ec_guard()
755 return -ETIME; in ec_guard()
763 while (repeat--) { in ec_poll()
769 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
771 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
774 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
776 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
778 return -ETIME; in ec_poll()
788 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
791 ret = -EINVAL; in acpi_ec_transaction_unlocked()
796 ec->curr = t; in acpi_ec_transaction_unlocked()
797 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); in acpi_ec_transaction_unlocked()
799 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
803 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
804 if (t->irq_count == ec_storm_threshold) in acpi_ec_transaction_unlocked()
806 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); in acpi_ec_transaction_unlocked()
807 ec->curr = NULL; in acpi_ec_transaction_unlocked()
812 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
821 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) in acpi_ec_transaction()
822 return -EINVAL; in acpi_ec_transaction()
823 if (t->rdata) in acpi_ec_transaction()
824 memset(t->rdata, 0, t->rlen); in acpi_ec_transaction()
826 mutex_lock(&ec->mutex); in acpi_ec_transaction()
827 if (ec->global_lock) { in acpi_ec_transaction()
830 status = -ENODEV; in acpi_ec_transaction()
837 if (ec->global_lock) in acpi_ec_transaction()
840 mutex_unlock(&ec->mutex); in acpi_ec_transaction()
893 return -ENODEV; in ec_read()
910 return -ENODEV; in ec_write()
927 return -ENODEV; in ec_transaction()
938 return first_ec->handle; in ec_get_handle()
946 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_start()
947 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { in acpi_ec_start()
949 /* Enable GPE for event processing (SCI_EVT=1) */ in acpi_ec_start()
956 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_start()
964 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stopped()
966 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stopped()
974 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
977 set_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
978 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
979 wait_event(ec->wait, acpi_ec_stopped(ec)); in acpi_ec_stop()
980 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
981 /* Disable GPE for event processing (SCI_EVT=1) */ in acpi_ec_stop()
987 clear_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_stop()
988 clear_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
991 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
998 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enter_noirq()
999 ec->busy_polling = true; in acpi_ec_enter_noirq()
1000 ec->polling_guard = 0; in acpi_ec_enter_noirq()
1002 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enter_noirq()
1009 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_leave_noirq()
1010 ec->busy_polling = ec_busy_polling; in acpi_ec_leave_noirq()
1011 ec->polling_guard = ec_polling_guard; in acpi_ec_leave_noirq()
1013 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_leave_noirq()
1023 mutex_lock(&ec->mutex); in acpi_ec_block_transactions()
1026 mutex_unlock(&ec->mutex); in acpi_ec_block_transactions()
1033 * atomic context during wakeup, so we don't need to acquire the mutex). in acpi_ec_unblock_transactions()
1039 /* --------------------------------------------------------------------------
1040 Event Management
1041 -------------------------------------------------------------------------- */
1047 mutex_lock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1048 list_for_each_entry(handler, &ec->list, node) { in acpi_ec_get_query_handler_by_value()
1049 if (value == handler->query_bit) { in acpi_ec_get_query_handler_by_value()
1050 kref_get(&handler->kref); in acpi_ec_get_query_handler_by_value()
1051 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1055 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1069 kref_put(&handler->kref, acpi_ec_query_handler_release); in acpi_ec_put_query_handler()
1080 return -ENOMEM; in acpi_ec_add_query_handler()
1082 handler->query_bit = query_bit; in acpi_ec_add_query_handler()
1083 handler->handle = handle; in acpi_ec_add_query_handler()
1084 handler->func = func; in acpi_ec_add_query_handler()
1085 handler->data = data; in acpi_ec_add_query_handler()
1086 mutex_lock(&ec->mutex); in acpi_ec_add_query_handler()
1087 kref_init(&handler->kref); in acpi_ec_add_query_handler()
1088 list_add(&handler->node, &ec->list); in acpi_ec_add_query_handler()
1089 mutex_unlock(&ec->mutex); in acpi_ec_add_query_handler()
1100 mutex_lock(&ec->mutex); in acpi_ec_remove_query_handlers()
1101 list_for_each_entry_safe(handler, tmp, &ec->list, node) { in acpi_ec_remove_query_handlers()
1102 if (remove_all || query_bit == handler->query_bit) { in acpi_ec_remove_query_handlers()
1103 list_del_init(&handler->node); in acpi_ec_remove_query_handlers()
1104 list_add(&handler->node, &free_list); in acpi_ec_remove_query_handlers()
1107 mutex_unlock(&ec->mutex); in acpi_ec_remove_query_handlers()
1126 INIT_WORK(&q->work, acpi_ec_event_processor); in acpi_ec_create_query()
1127 t = &q->transaction; in acpi_ec_create_query()
1128 t->command = ACPI_EC_COMMAND_QUERY; in acpi_ec_create_query()
1129 t->rdata = pval; in acpi_ec_create_query()
1130 t->rlen = 1; in acpi_ec_create_query()
1137 if (q->handler) in acpi_ec_delete_query()
1138 acpi_ec_put_query_handler(q->handler); in acpi_ec_delete_query()
1146 struct acpi_ec_query_handler *handler = q->handler; in acpi_ec_event_processor()
1148 ec_dbg_evt("Query(0x%02x) started", handler->query_bit); in acpi_ec_event_processor()
1149 if (handler->func) in acpi_ec_event_processor()
1150 handler->func(handler->data); in acpi_ec_event_processor()
1151 else if (handler->handle) in acpi_ec_event_processor()
1152 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); in acpi_ec_event_processor()
1153 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); in acpi_ec_event_processor()
1165 return -ENOMEM; in acpi_ec_query()
1172 result = acpi_ec_transaction(ec, &q->transaction); in acpi_ec_query()
1174 result = -ENODATA; in acpi_ec_query()
1178 q->handler = acpi_ec_get_query_handler_by_value(ec, value); in acpi_ec_query()
1179 if (!q->handler) { in acpi_ec_query()
1180 result = -ENODATA; in acpi_ec_query()
1194 if (!queue_work(ec_query_wq, &q->work)) { in acpi_ec_query()
1196 result = -EBUSY; in acpi_ec_query()
1213 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_check_event()
1218 if (!ec->curr) in acpi_ec_check_event()
1220 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_check_event()
1230 ec_dbg_evt("Event started"); in acpi_ec_event_handler()
1232 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_event_handler()
1233 while (ec->nr_pending_queries) { in acpi_ec_event_handler()
1234 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_event_handler()
1236 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_event_handler()
1237 ec->nr_pending_queries--; in acpi_ec_event_handler()
1244 if (!ec->nr_pending_queries) { in acpi_ec_event_handler()
1250 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_event_handler()
1252 ec_dbg_evt("Event stopped"); in acpi_ec_event_handler()
1261 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_handle_interrupt()
1263 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_handle_interrupt()
1279 /* --------------------------------------------------------------------------
1281 * -------------------------------------------------------------------------- */
1298 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1306 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1310 case -EINVAL: in acpi_ec_space_handler()
1312 case -ENODEV: in acpi_ec_space_handler()
1314 case -ETIME: in acpi_ec_space_handler()
1321 /* --------------------------------------------------------------------------
1323 * -------------------------------------------------------------------------- */
1343 mutex_init(&ec->mutex); in acpi_ec_alloc()
1344 init_waitqueue_head(&ec->wait); in acpi_ec_alloc()
1345 INIT_LIST_HEAD(&ec->list); in acpi_ec_alloc()
1346 spin_lock_init(&ec->lock); in acpi_ec_alloc()
1347 INIT_WORK(&ec->work, acpi_ec_event_handler); in acpi_ec_alloc()
1348 ec->timestamp = jiffies; in acpi_ec_alloc()
1349 ec->busy_polling = true; in acpi_ec_alloc()
1350 ec->polling_guard = 0; in acpi_ec_alloc()
1351 ec->gpe = -1; in acpi_ec_alloc()
1352 ec->irq = -1; in acpi_ec_alloc()
1381 ec->command_addr = ec->data_addr = 0; in ec_parse_device()
1387 if (ec->data_addr == 0 || ec->command_addr == 0) in ec_parse_device()
1395 ec->gpe = boot_ec->gpe; in ec_parse_device()
1401 ec->gpe = tmp; in ec_parse_device()
1404 * Errors are non-fatal, allowing for ACPI Reduced Hardware in ec_parse_device()
1411 ec->global_lock = tmp; in ec_parse_device()
1412 ec->handle = handle; in ec_parse_device()
1420 status = acpi_install_gpe_raw_handler(NULL, ec->gpe, in install_gpe_event_handler()
1426 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) in install_gpe_event_handler()
1434 return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, in install_gpio_irq_event_handler()
1439 * ec_install_handlers - Install service callbacks and register query methods.
1445 * namespace and register them, and install an event (either GPE or GPIO IRQ)
1449 * -ENODEV if the address space handler cannot be installed, which means
1451 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
1460 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1462 status = acpi_install_address_space_handler(ec->handle, in ec_install_handlers()
1468 return -ENODEV; in ec_install_handlers()
1470 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1476 if (ec->gpe < 0) { in ec_install_handlers()
1483 if (irq == -EPROBE_DEFER) in ec_install_handlers()
1484 return -EPROBE_DEFER; in ec_install_handlers()
1486 ec->irq = irq; in ec_install_handlers()
1489 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_install_handlers()
1491 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, in ec_install_handlers()
1494 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_install_handlers()
1496 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1499 if (ec->gpe >= 0) in ec_install_handlers()
1501 else if (ec->irq >= 0) in ec_install_handlers()
1505 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1509 * Failures to install an event handler are not fatal, because in ec_install_handlers()
1521 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1522 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, in ec_remove_handlers()
1525 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1541 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1542 if (ec->gpe >= 0 && in ec_remove_handlers()
1543 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, in ec_remove_handlers()
1547 if (ec->irq >= 0) in ec_remove_handlers()
1548 free_irq(ec->irq, ec); in ec_remove_handlers()
1550 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1552 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1554 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_remove_handlers()
1570 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, in acpi_ec_setup()
1571 ec->data_addr); in acpi_ec_setup()
1573 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in acpi_ec_setup()
1574 if (ec->gpe >= 0) in acpi_ec_setup()
1575 pr_info("GPE=0x%x\n", ec->gpe); in acpi_ec_setup()
1577 pr_info("IRQ=%d\n", ec->irq); in acpi_ec_setup()
1591 if (boot_ec && (boot_ec->handle == device->handle || in acpi_ec_add()
1600 return -ENOMEM; in acpi_ec_add()
1602 status = ec_parse_device(device->handle, 0, ec, NULL); in acpi_ec_add()
1604 ret = -EINVAL; in acpi_ec_add()
1608 if (boot_ec && ec->command_addr == boot_ec->command_addr && in acpi_ec_add()
1609 ec->data_addr == boot_ec->data_addr) { in acpi_ec_add()
1614 * boot_ec->gpe to ec->gpe. in acpi_ec_add()
1616 boot_ec->handle = ec->handle; in acpi_ec_add()
1617 acpi_handle_debug(ec->handle, "duplicated.\n"); in acpi_ec_add()
1628 acpi_handle_info(boot_ec->handle, in acpi_ec_add()
1632 acpi_handle_info(ec->handle, in acpi_ec_add()
1635 device->driver_data = ec; in acpi_ec_add()
1637 ret = !!request_region(ec->data_addr, 1, "EC data"); in acpi_ec_add()
1638 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); in acpi_ec_add()
1639 ret = !!request_region(ec->command_addr, 1, "EC cmd"); in acpi_ec_add()
1640 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); in acpi_ec_add()
1643 acpi_walk_dep_device_list(ec->handle); in acpi_ec_add()
1645 acpi_handle_debug(ec->handle, "enumerated.\n"); in acpi_ec_add()
1660 return -EINVAL; in acpi_ec_remove()
1663 release_region(ec->data_addr, 1); in acpi_ec_remove()
1664 release_region(ec->command_addr, 1); in acpi_ec_remove()
1665 device->driver_data = NULL; in acpi_ec_remove()
1678 if (resource->type != ACPI_RESOURCE_TYPE_IO) in ec_parse_io_ports()
1686 if (ec->data_addr == 0) in ec_parse_io_ports()
1687 ec->data_addr = resource->data.io.minimum; in ec_parse_io_ports()
1688 else if (ec->command_addr == 0) in ec_parse_io_ports()
1689 ec->command_addr = resource->data.io.minimum; in ec_parse_io_ports()
1703 * This function is not Windows-compatible as Windows never enumerates the
1731 if (ACPI_FAILURE(status) || !ec->handle) { in acpi_ec_dsdt_probe()
1737 * When the DSDT EC is available, always re-configure boot EC to in acpi_ec_dsdt_probe()
1751 acpi_handle_info(ec->handle, in acpi_ec_dsdt_probe()
1756 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
1775 if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT) in acpi_ec_ecdt_start()
1784 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); in acpi_ec_ecdt_start()
1786 boot_ec->handle = handle; in acpi_ec_ecdt_start()
1822 * MSI MS-171F
1846 ec_correct_ecdt, "MSI MS-171F", {
1847 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1848 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1889 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { in acpi_ec_ecdt_probe()
1902 ec->command_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
1903 ec->data_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
1905 ec->command_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
1906 ec->data_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
1914 ec->gpe = ecdt_ptr->gpe; in acpi_ec_ecdt_probe()
1916 ec->handle = ACPI_ROOT_OBJECT; in acpi_ec_ecdt_probe()
1956 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_suspend_noirq()
1957 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_suspend_noirq()
1958 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_suspend_noirq()
1971 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_resume_noirq()
1972 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_resume_noirq()
1973 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_resume_noirq()
1990 acpi_mark_gpe_for_wake(NULL, first_ec->gpe); in acpi_ec_mark_gpe_for_wake()
1994 void acpi_ec_set_gpe_wake_mask(u8 action) in acpi_ec_set_gpe_wake_mask() argument
1997 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); in acpi_ec_set_gpe_wake_mask()
2008 * Report wakeup if the status bit is set for any enabled GPE other in acpi_ec_dispatch_gpe()
2011 if (acpi_any_gpe_status_set(first_ec->gpe)) in acpi_ec_dispatch_gpe()
2015 * Dispatch the EC GPE in-band, but do not report wakeup in any case in acpi_ec_dispatch_gpe()
2018 ret = acpi_dispatch_gpe(NULL, first_ec->gpe); in acpi_ec_dispatch_gpe()
2022 /* Flush the event and query workqueues. */ in acpi_ec_dispatch_gpe()
2039 if (!strncmp(val, "status", sizeof("status") - 1)) { in param_set_event_clearing()
2042 } else if (!strncmp(val, "query", sizeof("query") - 1)) { in param_set_event_clearing()
2045 } else if (!strncmp(val, "event", sizeof("event") - 1)) { in param_set_event_clearing()
2047 pr_info("Assuming SCI_EVT clearing on event reads\n"); in param_set_event_clearing()
2049 result = -EINVAL; in param_set_event_clearing()
2062 return sprintf(buffer, "event\n"); in param_get_event_clearing()
2106 return -ENODEV; in acpi_ec_init_workqueues()
2145 * Disable EC wakeup on following systems to prevent periodic in acpi_ec_init()
2146 * wakeup from EC GPE. in acpi_ec_init()
2150 pr_debug("Disabling EC wakeup on suspend-to-idle\n"); in acpi_ec_init()