Lines Matching full:ec
3 * ec.c - ACPI Embedded Controller Driver (v3)
17 #define pr_fmt(fmt) "ACPI: EC: " fmt
38 /* EC status register */
43 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
47 * This leads to lots of practical timing issues for the host EC driver.
48 * The following variations are defined (from the target EC firmware's
57 * kind of EC firmware has implemented an event queue and will
77 /* EC commands */
86 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
88 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
90 * when trying to clear the EC */
107 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
110 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
118 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
122 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
168 struct acpi_ec *ec; member
171 static int acpi_ec_submit_query(struct acpi_ec *ec);
172 static void advance_transaction(struct acpi_ec *ec, bool interrupt);
192 * Splitters used by the developers to track the boundary of the EC
228 #define ec_dbg_ref(ec, fmt, ...) \ argument
229 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
235 static bool acpi_ec_started(struct acpi_ec *ec) in acpi_ec_started() argument
237 return test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_started()
238 !test_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_started()
241 static bool acpi_ec_event_enabled(struct acpi_ec *ec) in acpi_ec_event_enabled() argument
246 * the EC transactions are allowed to be performed. in acpi_ec_event_enabled()
248 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in acpi_ec_event_enabled()
254 * 1. true: The EC event handling is disabled before entering in acpi_ec_event_enabled()
256 * 2. false: The EC event handling is automatically disabled as in acpi_ec_event_enabled()
257 * soon as the EC driver is stopped. in acpi_ec_event_enabled()
260 return acpi_ec_started(ec); in acpi_ec_event_enabled()
262 return test_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_event_enabled()
265 static bool acpi_ec_flushed(struct acpi_ec *ec) in acpi_ec_flushed() argument
267 return ec->reference_count == 1; in acpi_ec_flushed()
271 * EC Registers
274 static inline u8 acpi_ec_read_status(struct acpi_ec *ec) in acpi_ec_read_status() argument
276 u8 x = inb(ec->command_addr); in acpi_ec_read_status()
289 static inline u8 acpi_ec_read_data(struct acpi_ec *ec) in acpi_ec_read_data() argument
291 u8 x = inb(ec->data_addr); in acpi_ec_read_data()
293 ec->timestamp = jiffies; in acpi_ec_read_data()
298 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) in acpi_ec_write_cmd() argument
301 outb(command, ec->command_addr); in acpi_ec_write_cmd()
302 ec->timestamp = jiffies; in acpi_ec_write_cmd()
305 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) in acpi_ec_write_data() argument
308 outb(data, ec->data_addr); in acpi_ec_write_data()
309 ec->timestamp = jiffies; in acpi_ec_write_data()
337 static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec) in acpi_ec_gpe_status_set() argument
341 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); in acpi_ec_gpe_status_set()
345 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) in acpi_ec_enable_gpe() argument
348 acpi_enable_gpe(NULL, ec->gpe); in acpi_ec_enable_gpe()
350 BUG_ON(ec->reference_count < 1); in acpi_ec_enable_gpe()
351 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_enable_gpe()
353 if (acpi_ec_gpe_status_set(ec)) { in acpi_ec_enable_gpe()
360 advance_transaction(ec, false); in acpi_ec_enable_gpe()
364 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close) in acpi_ec_disable_gpe() argument
367 acpi_disable_gpe(NULL, ec->gpe); in acpi_ec_disable_gpe()
369 BUG_ON(ec->reference_count < 1); in acpi_ec_disable_gpe()
370 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_disable_gpe()
378 static void acpi_ec_submit_request(struct acpi_ec *ec) in acpi_ec_submit_request() argument
380 ec->reference_count++; in acpi_ec_submit_request()
381 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_submit_request()
382 ec->gpe >= 0 && ec->reference_count == 1) in acpi_ec_submit_request()
383 acpi_ec_enable_gpe(ec, true); in acpi_ec_submit_request()
386 static void acpi_ec_complete_request(struct acpi_ec *ec) in acpi_ec_complete_request() argument
390 ec->reference_count--; in acpi_ec_complete_request()
391 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_complete_request()
392 ec->gpe >= 0 && ec->reference_count == 0) in acpi_ec_complete_request()
393 acpi_ec_disable_gpe(ec, true); in acpi_ec_complete_request()
394 flushed = acpi_ec_flushed(ec); in acpi_ec_complete_request()
396 wake_up(&ec->wait); in acpi_ec_complete_request()
399 static void acpi_ec_mask_events(struct acpi_ec *ec) in acpi_ec_mask_events() argument
401 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_mask_events()
402 if (ec->gpe >= 0) in acpi_ec_mask_events()
403 acpi_ec_disable_gpe(ec, false); in acpi_ec_mask_events()
405 disable_irq_nosync(ec->irq); in acpi_ec_mask_events()
408 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_mask_events()
412 static void acpi_ec_unmask_events(struct acpi_ec *ec) in acpi_ec_unmask_events() argument
414 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_unmask_events()
415 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_unmask_events()
416 if (ec->gpe >= 0) in acpi_ec_unmask_events()
417 acpi_ec_enable_gpe(ec, false); in acpi_ec_unmask_events()
419 enable_irq(ec->irq); in acpi_ec_unmask_events()
429 * @ec: the EC device
436 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) in acpi_ec_submit_flushable_request() argument
438 if (!acpi_ec_started(ec)) in acpi_ec_submit_flushable_request()
440 acpi_ec_submit_request(ec); in acpi_ec_submit_flushable_request()
444 static void acpi_ec_submit_event(struct acpi_ec *ec) in acpi_ec_submit_event() argument
450 acpi_ec_mask_events(ec); in acpi_ec_submit_event()
451 if (!acpi_ec_event_enabled(ec)) in acpi_ec_submit_event()
454 if (ec->event_state != EC_EVENT_READY) in acpi_ec_submit_event()
460 ec->event_state = EC_EVENT_IN_PROGRESS; in acpi_ec_submit_event()
468 if (ec->events_to_process++ > 0) in acpi_ec_submit_event()
471 ec->events_in_progress++; in acpi_ec_submit_event()
472 queue_work(ec_wq, &ec->work); in acpi_ec_submit_event()
475 static void acpi_ec_complete_event(struct acpi_ec *ec) in acpi_ec_complete_event() argument
477 if (ec->event_state == EC_EVENT_IN_PROGRESS) in acpi_ec_complete_event()
478 ec->event_state = EC_EVENT_COMPLETE; in acpi_ec_complete_event()
481 static void acpi_ec_close_event(struct acpi_ec *ec) in acpi_ec_close_event() argument
483 if (ec->event_state != EC_EVENT_READY) in acpi_ec_close_event()
487 ec->event_state = EC_EVENT_READY; in acpi_ec_close_event()
488 acpi_ec_unmask_events(ec); in acpi_ec_close_event()
491 static inline void __acpi_ec_enable_event(struct acpi_ec *ec) in __acpi_ec_enable_event() argument
493 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_enable_event()
499 advance_transaction(ec, false); in __acpi_ec_enable_event()
502 static inline void __acpi_ec_disable_event(struct acpi_ec *ec) in __acpi_ec_disable_event() argument
504 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_disable_event()
509 * Process _Q events that might have accumulated in the EC.
510 * Run with locked ec mutex.
512 static void acpi_ec_clear(struct acpi_ec *ec) in acpi_ec_clear() argument
517 if (acpi_ec_submit_query(ec)) in acpi_ec_clear()
521 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); in acpi_ec_clear()
523 pr_info("%d stale EC events cleared\n", i); in acpi_ec_clear()
526 static void acpi_ec_enable_event(struct acpi_ec *ec) in acpi_ec_enable_event() argument
530 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enable_event()
531 if (acpi_ec_started(ec)) in acpi_ec_enable_event()
532 __acpi_ec_enable_event(ec); in acpi_ec_enable_event()
533 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enable_event()
537 acpi_ec_clear(ec); in acpi_ec_enable_event()
543 flush_workqueue(ec_wq); /* flush ec->work */ in __acpi_ec_flush_work()
547 static void acpi_ec_disable_event(struct acpi_ec *ec) in acpi_ec_disable_event() argument
551 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_disable_event()
552 __acpi_ec_disable_event(ec); in acpi_ec_disable_event()
553 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_disable_event()
572 static bool acpi_ec_guard_event(struct acpi_ec *ec) in acpi_ec_guard_event() argument
577 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_guard_event()
591 ec->event_state != EC_EVENT_READY && in acpi_ec_guard_event()
592 (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); in acpi_ec_guard_event()
593 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_guard_event()
597 static int ec_transaction_polled(struct acpi_ec *ec) in ec_transaction_polled() argument
602 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_polled()
603 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) in ec_transaction_polled()
605 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_polled()
609 static int ec_transaction_completed(struct acpi_ec *ec) in ec_transaction_completed() argument
614 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_completed()
615 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) in ec_transaction_completed()
617 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_completed()
621 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) in ec_transaction_transition() argument
623 ec->curr->flags |= flag; in ec_transaction_transition()
625 if (ec->curr->command != ACPI_EC_COMMAND_QUERY) in ec_transaction_transition()
631 acpi_ec_close_event(ec); in ec_transaction_transition()
637 acpi_ec_close_event(ec); in ec_transaction_transition()
643 acpi_ec_complete_event(ec); in ec_transaction_transition()
647 static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t) in acpi_ec_spurious_interrupt() argument
654 acpi_ec_mask_events(ec); in acpi_ec_spurious_interrupt()
657 static void advance_transaction(struct acpi_ec *ec, bool interrupt) in advance_transaction() argument
659 struct transaction *t = ec->curr; in advance_transaction()
665 status = acpi_ec_read_status(ec); in advance_transaction()
673 ec->event_state == EC_EVENT_COMPLETE) in advance_transaction()
674 acpi_ec_close_event(ec); in advance_transaction()
683 acpi_ec_write_data(ec, t->wdata[t->wi++]); in advance_transaction()
685 acpi_ec_spurious_interrupt(ec, t); in advance_transaction()
688 t->rdata[t->ri++] = acpi_ec_read_data(ec); in advance_transaction()
690 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); in advance_transaction()
697 acpi_ec_spurious_interrupt(ec, t); in advance_transaction()
700 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); in advance_transaction()
704 acpi_ec_write_cmd(ec, t->command); in advance_transaction()
705 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); in advance_transaction()
710 acpi_ec_submit_event(ec); in advance_transaction()
713 wake_up(&ec->wait); in advance_transaction()
716 static void start_transaction(struct acpi_ec *ec) in start_transaction() argument
718 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; in start_transaction()
719 ec->curr->flags = 0; in start_transaction()
722 static int ec_guard(struct acpi_ec *ec) in ec_guard() argument
724 unsigned long guard = usecs_to_jiffies(ec->polling_guard); in ec_guard()
725 unsigned long timeout = ec->timestamp + guard; in ec_guard()
727 /* Ensure guarding period before polling EC status */ in ec_guard()
729 if (ec->busy_polling) { in ec_guard()
731 if (ec_transaction_completed(ec)) in ec_guard()
745 if (!ec_transaction_polled(ec) && in ec_guard()
746 !acpi_ec_guard_event(ec)) in ec_guard()
748 if (wait_event_timeout(ec->wait, in ec_guard()
749 ec_transaction_completed(ec), in ec_guard()
757 static int ec_poll(struct acpi_ec *ec) in ec_poll() argument
766 if (!ec_guard(ec)) in ec_poll()
768 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
769 advance_transaction(ec, false); in ec_poll()
770 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
773 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
774 start_transaction(ec); in ec_poll()
775 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
780 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, in acpi_ec_transaction_unlocked() argument
790 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
792 if (!acpi_ec_submit_flushable_request(ec)) { in acpi_ec_transaction_unlocked()
796 ec_dbg_ref(ec, "Increase command"); in acpi_ec_transaction_unlocked()
798 ec->curr = t; in acpi_ec_transaction_unlocked()
800 start_transaction(ec); in acpi_ec_transaction_unlocked()
801 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
803 ret = ec_poll(ec); in acpi_ec_transaction_unlocked()
805 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
807 acpi_ec_unmask_events(ec); in acpi_ec_transaction_unlocked()
809 ec->curr = NULL; in acpi_ec_transaction_unlocked()
811 acpi_ec_complete_request(ec); in acpi_ec_transaction_unlocked()
812 ec_dbg_ref(ec, "Decrease command"); in acpi_ec_transaction_unlocked()
814 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
818 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) in acpi_ec_transaction() argument
823 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) in acpi_ec_transaction()
826 mutex_lock(&ec->mutex); in acpi_ec_transaction()
827 if (ec->global_lock) { in acpi_ec_transaction()
835 status = acpi_ec_transaction_unlocked(ec, t); in acpi_ec_transaction()
837 if (ec->global_lock) in acpi_ec_transaction()
840 mutex_unlock(&ec->mutex); in acpi_ec_transaction()
844 static int acpi_ec_burst_enable(struct acpi_ec *ec) in acpi_ec_burst_enable() argument
851 return acpi_ec_transaction_unlocked(ec, &t); in acpi_ec_burst_enable()
854 static int acpi_ec_burst_disable(struct acpi_ec *ec) in acpi_ec_burst_disable() argument
860 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? in acpi_ec_burst_disable()
861 acpi_ec_transaction_unlocked(ec, &t) : 0; in acpi_ec_burst_disable()
864 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) in acpi_ec_read() argument
872 result = acpi_ec_transaction(ec, &t); in acpi_ec_read()
877 static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) in acpi_ec_read_unlocked() argument
885 result = acpi_ec_transaction_unlocked(ec, &t); in acpi_ec_read_unlocked()
890 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) in acpi_ec_write() argument
897 return acpi_ec_transaction(ec, &t); in acpi_ec_write()
900 static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) in acpi_ec_write_unlocked() argument
907 return acpi_ec_transaction_unlocked(ec, &t); in acpi_ec_write_unlocked()
952 /* Get the handle to the EC device */
961 static void acpi_ec_start(struct acpi_ec *ec, bool resuming) in acpi_ec_start() argument
965 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_start()
966 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { in acpi_ec_start()
967 ec_dbg_drv("Starting EC"); in acpi_ec_start()
970 acpi_ec_submit_request(ec); in acpi_ec_start()
971 ec_dbg_ref(ec, "Increase driver"); in acpi_ec_start()
973 ec_log_drv("EC started"); in acpi_ec_start()
975 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_start()
978 static bool acpi_ec_stopped(struct acpi_ec *ec) in acpi_ec_stopped() argument
983 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stopped()
984 flushed = acpi_ec_flushed(ec); in acpi_ec_stopped()
985 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stopped()
989 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) in acpi_ec_stop() argument
993 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
994 if (acpi_ec_started(ec)) { in acpi_ec_stop()
995 ec_dbg_drv("Stopping EC"); in acpi_ec_stop()
996 set_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
997 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
998 wait_event(ec->wait, acpi_ec_stopped(ec)); in acpi_ec_stop()
999 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
1002 acpi_ec_complete_request(ec); in acpi_ec_stop()
1003 ec_dbg_ref(ec, "Decrease driver"); in acpi_ec_stop()
1005 __acpi_ec_disable_event(ec); in acpi_ec_stop()
1006 clear_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_stop()
1007 clear_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
1008 ec_log_drv("EC stopped"); in acpi_ec_stop()
1010 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
1013 static void acpi_ec_enter_noirq(struct acpi_ec *ec) in acpi_ec_enter_noirq() argument
1017 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enter_noirq()
1018 ec->busy_polling = true; in acpi_ec_enter_noirq()
1019 ec->polling_guard = 0; in acpi_ec_enter_noirq()
1021 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enter_noirq()
1024 static void acpi_ec_leave_noirq(struct acpi_ec *ec) in acpi_ec_leave_noirq() argument
1028 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_leave_noirq()
1029 ec->busy_polling = ec_busy_polling; in acpi_ec_leave_noirq()
1030 ec->polling_guard = ec_polling_guard; in acpi_ec_leave_noirq()
1032 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_leave_noirq()
1037 struct acpi_ec *ec = first_ec; in acpi_ec_block_transactions() local
1039 if (!ec) in acpi_ec_block_transactions()
1042 mutex_lock(&ec->mutex); in acpi_ec_block_transactions()
1044 acpi_ec_stop(ec, true); in acpi_ec_block_transactions()
1045 mutex_unlock(&ec->mutex); in acpi_ec_block_transactions()
1062 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) in acpi_ec_get_query_handler_by_value() argument
1066 mutex_lock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1067 list_for_each_entry(handler, &ec->list, node) { in acpi_ec_get_query_handler_by_value()
1070 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1074 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1091 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, in acpi_ec_add_query_handler() argument
1108 mutex_lock(&ec->mutex); in acpi_ec_add_query_handler()
1110 list_add(&handler->node, &ec->list); in acpi_ec_add_query_handler()
1111 mutex_unlock(&ec->mutex); in acpi_ec_add_query_handler()
1117 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, in acpi_ec_remove_query_handlers() argument
1123 mutex_lock(&ec->mutex); in acpi_ec_remove_query_handlers()
1124 list_for_each_entry_safe(handler, tmp, &ec->list, node) { in acpi_ec_remove_query_handlers()
1129 * EC queries. in acpi_ec_remove_query_handlers()
1137 mutex_unlock(&ec->mutex); in acpi_ec_remove_query_handlers()
1142 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) in acpi_ec_remove_query_handler() argument
1144 acpi_ec_remove_query_handlers(ec, false, query_bit); in acpi_ec_remove_query_handler()
1153 struct acpi_ec *ec = q->ec; in acpi_ec_event_processor() local
1164 spin_lock_irq(&ec->lock); in acpi_ec_event_processor()
1165 ec->queries_in_progress--; in acpi_ec_event_processor()
1166 spin_unlock_irq(&ec->lock); in acpi_ec_event_processor()
1172 static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) in acpi_ec_create_query() argument
1186 q->ec = ec; in acpi_ec_create_query()
1190 static int acpi_ec_submit_query(struct acpi_ec *ec) in acpi_ec_submit_query() argument
1196 q = acpi_ec_create_query(ec, &value); in acpi_ec_submit_query()
1201 * Query the EC to find out which _Qxx method we need to evaluate. in acpi_ec_submit_query()
1205 result = acpi_ec_transaction(ec, &q->transaction); in acpi_ec_submit_query()
1214 q->handler = acpi_ec_get_query_handler_by_value(ec, value); in acpi_ec_submit_query()
1229 spin_lock_irq(&ec->lock); in acpi_ec_submit_query()
1231 ec->queries_in_progress++; in acpi_ec_submit_query()
1234 spin_unlock_irq(&ec->lock); in acpi_ec_submit_query()
1246 struct acpi_ec *ec = container_of(work, struct acpi_ec, work); in acpi_ec_event_handler() local
1250 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1252 while (ec->events_to_process) { in acpi_ec_event_handler()
1253 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1255 acpi_ec_submit_query(ec); in acpi_ec_event_handler()
1257 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1259 ec->events_to_process--; in acpi_ec_event_handler()
1270 acpi_ec_complete_event(ec); in acpi_ec_event_handler()
1274 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1276 guard_timeout = !!ec_guard(ec); in acpi_ec_event_handler()
1278 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1281 if (guard_timeout && !ec->curr) in acpi_ec_event_handler()
1282 advance_transaction(ec, false); in acpi_ec_event_handler()
1284 acpi_ec_close_event(ec); in acpi_ec_event_handler()
1289 ec->events_in_progress--; in acpi_ec_event_handler()
1291 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1294 static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) in clear_gpe_and_advance_transaction() argument
1308 if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) in clear_gpe_and_advance_transaction()
1309 acpi_clear_gpe(NULL, ec->gpe); in clear_gpe_and_advance_transaction()
1311 advance_transaction(ec, true); in clear_gpe_and_advance_transaction()
1314 static void acpi_ec_handle_interrupt(struct acpi_ec *ec) in acpi_ec_handle_interrupt() argument
1318 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_handle_interrupt()
1320 clear_gpe_and_advance_transaction(ec, true); in acpi_ec_handle_interrupt()
1322 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_handle_interrupt()
1347 struct acpi_ec *ec = handler_context; in acpi_ec_space_handler() local
1358 mutex_lock(&ec->mutex); in acpi_ec_space_handler()
1360 if (ec->global_lock) { in acpi_ec_space_handler()
1370 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1371 acpi_ec_burst_enable(ec); in acpi_ec_space_handler()
1375 acpi_ec_read_unlocked(ec, address, value) : in acpi_ec_space_handler()
1376 acpi_ec_write_unlocked(ec, address, *value); in acpi_ec_space_handler()
1381 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1382 acpi_ec_burst_disable(ec); in acpi_ec_space_handler()
1384 if (ec->global_lock) in acpi_ec_space_handler()
1388 mutex_unlock(&ec->mutex); in acpi_ec_space_handler()
1411 static void acpi_ec_free(struct acpi_ec *ec) in acpi_ec_free() argument
1413 if (first_ec == ec) in acpi_ec_free()
1415 if (boot_ec == ec) in acpi_ec_free()
1417 kfree(ec); in acpi_ec_free()
1422 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); in acpi_ec_alloc() local
1424 if (!ec) in acpi_ec_alloc()
1426 mutex_init(&ec->mutex); in acpi_ec_alloc()
1427 init_waitqueue_head(&ec->wait); in acpi_ec_alloc()
1428 INIT_LIST_HEAD(&ec->list); in acpi_ec_alloc()
1429 spin_lock_init(&ec->lock); in acpi_ec_alloc()
1430 INIT_WORK(&ec->work, acpi_ec_event_handler); in acpi_ec_alloc()
1431 ec->timestamp = jiffies; in acpi_ec_alloc()
1432 ec->busy_polling = true; in acpi_ec_alloc()
1433 ec->polling_guard = 0; in acpi_ec_alloc()
1434 ec->gpe = -1; in acpi_ec_alloc()
1435 ec->irq = -1; in acpi_ec_alloc()
1436 return ec; in acpi_ec_alloc()
1445 struct acpi_ec *ec = context; in acpi_ec_register_query_methods() local
1452 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); in acpi_ec_register_query_methods()
1461 struct acpi_ec *ec = context; in ec_parse_device() local
1464 ec->command_addr = ec->data_addr = 0; in ec_parse_device()
1467 ec_parse_io_ports, ec); in ec_parse_device()
1470 if (ec->data_addr == 0 || ec->command_addr == 0) in ec_parse_device()
1473 /* Get GPE bit assignment (EC events). */ in ec_parse_device()
1477 ec->gpe = tmp; in ec_parse_device()
1483 /* Use the global lock for all EC transactions? */ in ec_parse_device()
1486 ec->global_lock = tmp; in ec_parse_device()
1487 ec->handle = handle; in ec_parse_device()
1491 static bool install_gpe_event_handler(struct acpi_ec *ec) in install_gpe_event_handler() argument
1495 status = acpi_install_gpe_raw_handler(NULL, ec->gpe, in install_gpe_event_handler()
1497 &acpi_ec_gpe_handler, ec); in install_gpe_event_handler()
1501 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) in install_gpe_event_handler()
1502 acpi_ec_enable_gpe(ec, true); in install_gpe_event_handler()
1507 static bool install_gpio_irq_event_handler(struct acpi_ec *ec) in install_gpio_irq_event_handler() argument
1509 return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler, in install_gpio_irq_event_handler()
1510 IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0; in install_gpio_irq_event_handler()
1515 * @ec: Target EC.
1516 * @device: ACPI device object corresponding to @ec.
1519 * Install a handler for the EC address space type unless it has been installed
1520 * already. If @device is not NULL, also look for EC query methods in the
1522 * handler for the EC, if possible.
1530 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, in ec_install_handlers() argument
1535 acpi_ec_start(ec, false); in ec_install_handlers()
1537 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1538 acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; in ec_install_handlers()
1540 acpi_ec_enter_noirq(ec); in ec_install_handlers()
1544 NULL, ec); in ec_install_handlers()
1546 acpi_ec_stop(ec, false); in ec_install_handlers()
1549 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1552 if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { in ec_install_handlers()
1553 acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); in ec_install_handlers()
1554 set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); in ec_install_handlers()
1560 if (ec->gpe < 0) { in ec_install_handlers()
1570 ec->irq = irq; in ec_install_handlers()
1573 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_install_handlers()
1575 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, in ec_install_handlers()
1577 NULL, ec, NULL); in ec_install_handlers()
1578 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_install_handlers()
1580 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1583 if (ec->gpe >= 0) in ec_install_handlers()
1584 ready = install_gpe_event_handler(ec); in ec_install_handlers()
1585 else if (ec->irq >= 0) in ec_install_handlers()
1586 ready = install_gpio_irq_event_handler(ec); in ec_install_handlers()
1589 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1590 acpi_ec_leave_noirq(ec); in ec_install_handlers()
1594 * the EC can be polled for events. in ec_install_handlers()
1597 /* EC is fully operational, allow queries */ in ec_install_handlers()
1598 acpi_ec_enable_event(ec); in ec_install_handlers()
1603 static void ec_remove_handlers(struct acpi_ec *ec) in ec_remove_handlers() argument
1605 acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; in ec_remove_handlers()
1607 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1613 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1617 * Stops handling the EC transactions after removing the operation in ec_remove_handlers()
1619 * invoked during the removal can result in new EC transactions. in ec_remove_handlers()
1621 * Flushes the EC requests and thus disables the GPE before in ec_remove_handlers()
1627 acpi_ec_stop(ec, false); in ec_remove_handlers()
1629 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1630 if (ec->gpe >= 0 && in ec_remove_handlers()
1631 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, in ec_remove_handlers()
1635 if (ec->irq >= 0) in ec_remove_handlers()
1636 free_irq(ec->irq, ec); in ec_remove_handlers()
1638 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1640 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1641 acpi_ec_remove_query_handlers(ec, true, 0); in ec_remove_handlers()
1642 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_remove_handlers()
1646 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) in acpi_ec_setup() argument
1650 /* First EC capable of handling transactions */ in acpi_ec_setup()
1652 first_ec = ec; in acpi_ec_setup()
1654 ret = ec_install_handlers(ec, device, call_reg); in acpi_ec_setup()
1656 if (ec == first_ec) in acpi_ec_setup()
1662 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, in acpi_ec_setup()
1663 ec->data_addr); in acpi_ec_setup()
1665 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in acpi_ec_setup()
1666 if (ec->gpe >= 0) in acpi_ec_setup()
1667 pr_info("GPE=0x%x\n", ec->gpe); in acpi_ec_setup()
1669 pr_info("IRQ=%d\n", ec->irq); in acpi_ec_setup()
1677 struct acpi_ec *ec; in acpi_ec_add() local
1685 /* Fast path: this device corresponds to the boot EC. */ in acpi_ec_add()
1686 ec = boot_ec; in acpi_ec_add()
1690 ec = acpi_ec_alloc(); in acpi_ec_add()
1691 if (!ec) in acpi_ec_add()
1694 status = ec_parse_device(device->handle, 0, ec, NULL); in acpi_ec_add()
1700 if (boot_ec && ec->command_addr == boot_ec->command_addr && in acpi_ec_add()
1701 ec->data_addr == boot_ec->data_addr) { in acpi_ec_add()
1705 * quirks. So do not change boot_ec->gpe to ec->gpe, in acpi_ec_add()
1708 boot_ec->handle = ec->handle; in acpi_ec_add()
1711 boot_ec->gpe = ec->gpe; in acpi_ec_add()
1713 acpi_handle_debug(ec->handle, "duplicated.\n"); in acpi_ec_add()
1714 acpi_ec_free(ec); in acpi_ec_add()
1715 ec = boot_ec; in acpi_ec_add()
1719 ret = acpi_ec_setup(ec, device, true); in acpi_ec_add()
1723 if (ec == boot_ec) in acpi_ec_add()
1725 "Boot %s EC initialization complete\n", in acpi_ec_add()
1728 acpi_handle_info(ec->handle, in acpi_ec_add()
1729 "EC: Used to handle transactions and events\n"); in acpi_ec_add()
1731 device->driver_data = ec; in acpi_ec_add()
1733 ret = !!request_region(ec->data_addr, 1, "EC data"); in acpi_ec_add()
1734 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); in acpi_ec_add()
1735 ret = !!request_region(ec->command_addr, 1, "EC cmd"); in acpi_ec_add()
1736 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); in acpi_ec_add()
1738 /* Reprobe devices depending on the EC */ in acpi_ec_add()
1741 acpi_handle_debug(ec->handle, "enumerated.\n"); in acpi_ec_add()
1745 if (ec != boot_ec) in acpi_ec_add()
1746 acpi_ec_free(ec); in acpi_ec_add()
1753 struct acpi_ec *ec; in acpi_ec_remove() local
1758 ec = acpi_driver_data(device); in acpi_ec_remove()
1759 release_region(ec->data_addr, 1); in acpi_ec_remove()
1760 release_region(ec->command_addr, 1); in acpi_ec_remove()
1762 if (ec != boot_ec) { in acpi_ec_remove()
1763 ec_remove_handlers(ec); in acpi_ec_remove()
1764 acpi_ec_free(ec); in acpi_ec_remove()
1777 struct acpi_ec *ec = context; in ec_parse_io_ports() local
1787 if (ec->data_addr == 0) in ec_parse_io_ports()
1788 ec->data_addr = resource->data.io.minimum; in ec_parse_io_ports()
1789 else if (ec->command_addr == 0) in ec_parse_io_ports()
1790 ec->command_addr = resource->data.io.minimum; in ec_parse_io_ports()
1805 * namespace EC before the main ACPI device enumeration process. It is
1810 struct acpi_ec *ec; in acpi_ec_dsdt_probe() local
1818 * picking up an invalid EC device. in acpi_ec_dsdt_probe()
1823 ec = acpi_ec_alloc(); in acpi_ec_dsdt_probe()
1824 if (!ec) in acpi_ec_dsdt_probe()
1831 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL); in acpi_ec_dsdt_probe()
1832 if (ACPI_FAILURE(status) || !ec->handle) { in acpi_ec_dsdt_probe()
1833 acpi_ec_free(ec); in acpi_ec_dsdt_probe()
1838 * When the DSDT EC is available, always re-configure boot EC to in acpi_ec_dsdt_probe()
1844 ret = acpi_ec_setup(ec, NULL, true); in acpi_ec_dsdt_probe()
1846 acpi_ec_free(ec); in acpi_ec_dsdt_probe()
1850 boot_ec = ec; in acpi_ec_dsdt_probe()
1852 acpi_handle_info(ec->handle, in acpi_ec_dsdt_probe()
1853 "Boot DSDT EC used to handle transactions\n"); in acpi_ec_dsdt_probe()
1857 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
1859 * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
1862 * Next, in case the DSDT EC is not functioning, it is still necessary to
1863 * provide a functional ECDT EC to handle events, so add an extra device object
1866 * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
1875 /* Bail out if a matching EC has been found in the namespace. */ in acpi_ec_ecdt_start()
1889 /* Add a special ACPI device object to represent the boot EC. */ in acpi_ec_ecdt_start()
1897 * On some hardware it is necessary to clear events accumulated by the EC during
1903 * Ideally, the EC should also be instructed NOT to accumulate events during
1915 pr_debug("Detected system needing EC poll on resume.\n"); in ec_clear_on_resume()
1935 * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
2015 struct acpi_ec *ec; in acpi_ec_ecdt_probe() local
2019 /* Generate a boot ec context. */ in acpi_ec_ecdt_probe()
2034 ec = acpi_ec_alloc(); in acpi_ec_ecdt_probe()
2035 if (!ec) in acpi_ec_ecdt_probe()
2039 ec->command_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
2040 ec->data_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
2042 ec->command_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
2043 ec->data_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
2051 ec->gpe = ecdt_ptr->gpe; in acpi_ec_ecdt_probe()
2053 ec->handle = ACPI_ROOT_OBJECT; in acpi_ec_ecdt_probe()
2059 ret = acpi_ec_setup(ec, NULL, false); in acpi_ec_ecdt_probe()
2061 acpi_ec_free(ec); in acpi_ec_ecdt_probe()
2065 boot_ec = ec; in acpi_ec_ecdt_probe()
2068 pr_info("Boot ECDT EC used to handle transactions\n"); in acpi_ec_ecdt_probe()
2077 struct acpi_ec *ec = in acpi_ec_suspend() local
2081 acpi_ec_disable_event(ec); in acpi_ec_suspend()
2087 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); in acpi_ec_suspend_noirq() local
2093 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_suspend_noirq()
2094 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_suspend_noirq()
2095 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_suspend_noirq()
2097 acpi_ec_enter_noirq(ec); in acpi_ec_suspend_noirq()
2104 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); in acpi_ec_resume_noirq() local
2106 acpi_ec_leave_noirq(ec); in acpi_ec_resume_noirq()
2108 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_resume_noirq()
2109 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_resume_noirq()
2110 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_resume_noirq()
2117 struct acpi_ec *ec = in acpi_ec_resume() local
2120 acpi_ec_enable_event(ec); in acpi_ec_resume()
2137 static bool acpi_ec_work_in_progress(struct acpi_ec *ec) in acpi_ec_work_in_progress() argument
2139 return ec->events_in_progress + ec->queries_in_progress > 0; in acpi_ec_work_in_progress()
2151 * than the EC one. in acpi_ec_dispatch_gpe()
2160 * Note that if any non-EC GPEs are active at this point, the SCI will in acpi_ec_dispatch_gpe()
2167 * Dispatch the EC GPE in-band, but do not report wakeup in any case in acpi_ec_dispatch_gpe()
2173 pm_pr_dbg("ACPI EC GPE status set\n"); in acpi_ec_dispatch_gpe()
2184 pm_pr_dbg("ACPI EC GPE dispatched\n"); in acpi_ec_dispatch_gpe()
2186 /* Drain EC work. */ in acpi_ec_dispatch_gpe()
2190 pm_pr_dbg("ACPI EC work flushed\n"); in acpi_ec_dispatch_gpe()
2248 .name = "ec",
2316 * Disable EC wakeup on following systems to prevent periodic in acpi_ec_init()
2317 * wakeup from EC GPE. in acpi_ec_init()
2321 pr_debug("Disabling EC wakeup on suspend-to-idle\n"); in acpi_ec_init()
2330 /* EC driver currently not unloadable */