Lines Matching +full:pd +full:- +full:disable
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020-2021 ARM Ltd.
36 * All users provided callbacks and allocated notification-chains are stored in
44 * hash-keys.
54 * pushes the event-data itself on a protocol-dedicated kfifo queue for further
59 * queued items into the proper notification-chain: notifications processing can
65 * conveyed, converted into a custom per-event report struct, as the void *data
73 #define dev_fmt(fmt) "SCMI Notifications - " fmt
74 #define pr_fmt(fmt) "SCMI Notifications - " fmt
102 #define NOTIF_UNSUPP -1
116 * Assumes that the stored obj includes its own hash-key in a field named 'key':
132 if (obj_->key == k_) \
153 __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
163 if (pd_ && eid_ < pd_->num_events) \
164 __revt = READ_ONCE(pd_->registered_events[eid_]); \
182 r->proto->ops->set_notify_enabled(r->proto->ph, \
195 r->proto->ops->fill_custom_report(r->proto->ph, \
205 * struct scmi_notify_instance - Represents an instance of the notification
213 * all the registered protocol-level specific information
233 * struct events_queue - Describes a queue and its associated worker
249 * struct scmi_event_header - A utility header
269 * struct scmi_registered_events_desc - Protocol Specific information
271 * @ops: Protocol specific and event-related operations
272 * @equeue: The embedded per-protocol events_queue
274 * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
276 * @eh_sz: Size of the pre-allocated buffer @eh
280 * events' descriptors, whose fixed-size is determined at
287 * All protocols that register at least one event have their protocol-specific
294 * we safely grab a NON-NULL reference from the array we can keep it and use it.
313 * struct scmi_registered_event - Event Specific Information
317 * @report: A pre-allocated buffer used by the deferred worker to fill a
330 * safely grab a NON-NULL reference from the table we can keep it and use it.
343 * struct scmi_event_handler - Event handler information
357 * These descriptors are stored in a per-protocol @registered_events_handlers
369 #define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
379 * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
382 * @report: The customized event-specific report to pass down to the callbacks
402 ret = blocking_notifier_call_chain(&hndl->chain, in scmi_lookup_and_call_event_chain()
412 * scmi_process_event_header() - Dequeue and process an event header
414 * @pd: The protocol descriptor to use
422 * * ERR_PTR(-EINVAL) when NO registered event could be found
427 struct scmi_registered_events_desc *pd) in scmi_process_event_header() argument
432 outs = kfifo_out(&eq->kfifo, pd->eh, in scmi_process_event_header()
437 dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n"); in scmi_process_event_header()
438 kfifo_reset_out(&eq->kfifo); in scmi_process_event_header()
442 r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id); in scmi_process_event_header()
444 r_evt = ERR_PTR(-EINVAL); in scmi_process_event_header()
450 * scmi_process_event_payload() - Dequeue and process an event payload
452 * @pd: The protocol descriptor to use
464 struct scmi_registered_events_desc *pd, in scmi_process_event_payload() argument
471 outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz); in scmi_process_event_payload()
475 /* Any in-flight event has now been officially processed */ in scmi_process_event_payload()
476 pd->in_flight = NULL; in scmi_process_event_payload()
478 if (outs != pd->eh->payld_sz) { in scmi_process_event_payload()
479 dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n"); in scmi_process_event_payload()
480 kfifo_reset_out(&eq->kfifo); in scmi_process_event_payload()
485 dev_warn(pd->ni->handle->dev, in scmi_process_event_payload()
486 "SKIP UNKNOWN EVT - proto:%X evt:%d\n", in scmi_process_event_payload()
487 pd->id, pd->eh->evt_id); in scmi_process_event_payload()
491 report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp, in scmi_process_event_payload()
492 pd->eh->payld, pd->eh->payld_sz, in scmi_process_event_payload()
493 r_evt->report, &src_id); in scmi_process_event_payload()
495 dev_err(pd->ni->handle->dev, in scmi_process_event_payload()
496 "report not available - proto:%X evt:%d\n", in scmi_process_event_payload()
497 pd->id, pd->eh->evt_id); in scmi_process_event_payload()
502 key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id); in scmi_process_event_payload()
503 scmi_lookup_and_call_event_chain(pd->ni, key, report); in scmi_process_event_payload()
506 key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id); in scmi_process_event_payload()
507 scmi_lookup_and_call_event_chain(pd->ni, key, report); in scmi_process_event_payload()
513 * scmi_events_dispatcher() - Common worker logic for all work items.
520 * - > call the related notification chain passing in the report
522 * - > call the related notification chain passing in the report
525 * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
527 * * each per-protocol queue is associated to a distinct work_item, which
533 * reader/writer on the associated kfifo, so that we can use it lock-less
540 struct scmi_registered_events_desc *pd; in scmi_events_dispatcher() local
544 pd = container_of(eq, struct scmi_registered_events_desc, equeue); in scmi_events_dispatcher()
546 * In order to keep the queue lock-less and the number of memcopies in scmi_events_dispatcher()
548 * possibility of per-protocol in-flight events: i.e. an event whose in scmi_events_dispatcher()
553 if (!pd->in_flight) { in scmi_events_dispatcher()
554 r_evt = scmi_process_event_header(eq, pd); in scmi_events_dispatcher()
557 pd->in_flight = r_evt; in scmi_events_dispatcher()
559 r_evt = pd->in_flight; in scmi_events_dispatcher()
561 } while (scmi_process_event_payload(eq, pd, r_evt)); in scmi_events_dispatcher()
565 * scmi_notify() - Queues a notification for further deferred processing
592 return -EINVAL; in scmi_notify()
594 if (len > r_evt->evt->max_payld_sz) { in scmi_notify()
595 dev_err(handle->dev, "discard badly sized message\n"); in scmi_notify()
596 return -EINVAL; in scmi_notify()
598 if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) { in scmi_notify()
599 dev_warn(handle->dev, in scmi_notify()
602 return -ENOMEM; in scmi_notify()
611 * with in-flight events tracking. in scmi_notify()
613 kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh)); in scmi_notify()
614 kfifo_in(&r_evt->proto->equeue.kfifo, buf, len); in scmi_notify()
619 * - if work was already queued it will simply fail to queue a new one in scmi_notify()
621 * - if work was not queued already it will be now, even in case work in scmi_notify()
627 queue_work(r_evt->proto->equeue.wq, in scmi_notify()
628 &r_evt->proto->equeue.notify_work); in scmi_notify()
634 * scmi_kfifo_free() - Devres action helper to free the kfifo
643 * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
657 if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL)) in scmi_initialize_events_queue()
658 return -ENOMEM; in scmi_initialize_events_queue()
659 /* Size could have been roundup to power-of-two */ in scmi_initialize_events_queue()
660 equeue->sz = kfifo_size(&equeue->kfifo); in scmi_initialize_events_queue()
662 ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free, in scmi_initialize_events_queue()
663 &equeue->kfifo); in scmi_initialize_events_queue()
667 INIT_WORK(&equeue->notify_work, scmi_events_dispatcher); in scmi_initialize_events_queue()
668 equeue->wq = ni->notify_wq; in scmi_initialize_events_queue()
674 * scmi_allocate_registered_events_desc() - Allocate a registered events'
680 * @eh_sz: Size of the event header scratch area to pre-allocate
698 struct scmi_registered_events_desc *pd; in scmi_allocate_registered_events_desc() local
702 if (WARN_ON(ni->registered_protocols[proto_id])) in scmi_allocate_registered_events_desc()
703 return ERR_PTR(-EINVAL); in scmi_allocate_registered_events_desc()
705 pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL); in scmi_allocate_registered_events_desc()
706 if (!pd) in scmi_allocate_registered_events_desc()
707 return ERR_PTR(-ENOMEM); in scmi_allocate_registered_events_desc()
708 pd->id = proto_id; in scmi_allocate_registered_events_desc()
709 pd->ops = ops; in scmi_allocate_registered_events_desc()
710 pd->ni = ni; in scmi_allocate_registered_events_desc()
712 ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz); in scmi_allocate_registered_events_desc()
716 pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL); in scmi_allocate_registered_events_desc()
717 if (!pd->eh) in scmi_allocate_registered_events_desc()
718 return ERR_PTR(-ENOMEM); in scmi_allocate_registered_events_desc()
719 pd->eh_sz = eh_sz; in scmi_allocate_registered_events_desc()
721 pd->registered_events = devm_kcalloc(ni->handle->dev, num_events, in scmi_allocate_registered_events_desc()
723 if (!pd->registered_events) in scmi_allocate_registered_events_desc()
724 return ERR_PTR(-ENOMEM); in scmi_allocate_registered_events_desc()
725 pd->num_events = num_events; in scmi_allocate_registered_events_desc()
728 mutex_init(&pd->registered_mtx); in scmi_allocate_registered_events_desc()
729 hash_init(pd->registered_events_handlers); in scmi_allocate_registered_events_desc()
731 return pd; in scmi_allocate_registered_events_desc()
735 * scmi_register_protocol_events() - Register Protocol Events with the core
744 * pre-allocate and store all needed descriptors, scratch buffers and event
756 struct scmi_registered_events_desc *pd; in scmi_register_protocol_events() local
760 if (!ee || !ee->ops || !ee->evts || !ph || in scmi_register_protocol_events()
761 (!ee->num_sources && !ee->ops->get_num_sources)) in scmi_register_protocol_events()
762 return -EINVAL; in scmi_register_protocol_events()
766 return -ENOMEM; in scmi_register_protocol_events()
769 if (ee->num_sources) { in scmi_register_protocol_events()
770 num_sources = ee->num_sources; in scmi_register_protocol_events()
772 int nsrc = ee->ops->get_num_sources(ph); in scmi_register_protocol_events()
775 return -EINVAL; in scmi_register_protocol_events()
779 evt = ee->evts; in scmi_register_protocol_events()
780 for (i = 0; i < ee->num_events; i++) in scmi_register_protocol_events()
784 pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz, in scmi_register_protocol_events()
785 payld_sz, ee->num_events, in scmi_register_protocol_events()
786 ee->ops); in scmi_register_protocol_events()
787 if (IS_ERR(pd)) in scmi_register_protocol_events()
788 return PTR_ERR(pd); in scmi_register_protocol_events()
790 pd->ph = ph; in scmi_register_protocol_events()
791 for (i = 0; i < ee->num_events; i++, evt++) { in scmi_register_protocol_events()
795 r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), in scmi_register_protocol_events()
798 return -ENOMEM; in scmi_register_protocol_events()
799 r_evt->proto = pd; in scmi_register_protocol_events()
800 r_evt->evt = evt; in scmi_register_protocol_events()
802 r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources, in scmi_register_protocol_events()
804 if (!r_evt->sources) in scmi_register_protocol_events()
805 return -ENOMEM; in scmi_register_protocol_events()
806 r_evt->num_sources = num_sources; in scmi_register_protocol_events()
807 mutex_init(&r_evt->sources_mtx); in scmi_register_protocol_events()
809 r_evt->report = devm_kzalloc(ni->handle->dev, in scmi_register_protocol_events()
810 evt->max_report_sz, GFP_KERNEL); in scmi_register_protocol_events()
811 if (!r_evt->report) in scmi_register_protocol_events()
812 return -ENOMEM; in scmi_register_protocol_events()
814 for (id = 0; id < r_evt->num_sources; id++) in scmi_register_protocol_events()
815 if (ee->ops->is_notify_supported && in scmi_register_protocol_events()
816 !ee->ops->is_notify_supported(ph, r_evt->evt->id, id)) in scmi_register_protocol_events()
817 refcount_set(&r_evt->sources[id], NOTIF_UNSUPP); in scmi_register_protocol_events()
819 pd->registered_events[i] = r_evt; in scmi_register_protocol_events()
822 dev_dbg(handle->dev, "registered event - %lX\n", in scmi_register_protocol_events()
823 MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id)); in scmi_register_protocol_events()
827 ni->registered_protocols[proto_id] = pd; in scmi_register_protocol_events()
835 schedule_work(&ni->init_work); in scmi_register_protocol_events()
841 * scmi_deregister_protocol_events - Deregister protocol events with the core
850 struct scmi_registered_events_desc *pd; in scmi_deregister_protocol_events() local
856 pd = ni->registered_protocols[proto_id]; in scmi_deregister_protocol_events()
857 if (!pd) in scmi_deregister_protocol_events()
860 ni->registered_protocols[proto_id] = NULL; in scmi_deregister_protocol_events()
864 cancel_work_sync(&pd->equeue.notify_work); in scmi_deregister_protocol_events()
868 * scmi_allocate_event_handler() - Allocate Event handler
876 * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
890 hndl->key = evt_key; in scmi_allocate_event_handler()
891 BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain); in scmi_allocate_event_handler()
892 refcount_set(&hndl->users, 1); in scmi_allocate_event_handler()
894 hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key); in scmi_allocate_event_handler()
900 * scmi_free_event_handler() - Free the provided Event handler
908 hash_del(&hndl->hash); in scmi_free_event_handler()
913 * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
929 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key), in scmi_bind_event_handler()
930 KEY_XTRACT_EVT_ID(hndl->key)); in scmi_bind_event_handler()
932 return -EINVAL; in scmi_bind_event_handler()
938 hash_del(&hndl->hash); in scmi_bind_event_handler()
946 scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key)); in scmi_bind_event_handler()
947 hndl->r_evt = r_evt; in scmi_bind_event_handler()
949 mutex_lock(&r_evt->proto->registered_mtx); in scmi_bind_event_handler()
950 hash_add(r_evt->proto->registered_events_handlers, in scmi_bind_event_handler()
951 &hndl->hash, hndl->key); in scmi_bind_event_handler()
952 mutex_unlock(&r_evt->proto->registered_mtx); in scmi_bind_event_handler()
958 * scmi_valid_pending_handler() - Helper to check pending status of handlers
974 struct scmi_registered_events_desc *pd; in scmi_valid_pending_handler() local
977 return -EINVAL; in scmi_valid_pending_handler()
979 pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key)); in scmi_valid_pending_handler()
980 if (pd) in scmi_valid_pending_handler()
981 return -EINVAL; in scmi_valid_pending_handler()
987 * scmi_register_event_handler() - Register whenever possible an Event handler
1010 dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n", in scmi_register_event_handler()
1011 hndl->key); in scmi_register_event_handler()
1015 dev_dbg(ni->handle->dev, in scmi_register_event_handler()
1016 "registered PENDING handler - key:%X\n", in scmi_register_event_handler()
1017 hndl->key); in scmi_register_event_handler()
1024 * __scmi_event_handler_get_ops() - Utility to get or create an event handler
1030 * Search for the desired handler matching the key in both the per-protocol
1063 mutex_lock(&ni->pending_mtx); in __scmi_event_handler_get_ops()
1066 mutex_lock(&r_evt->proto->registered_mtx); in __scmi_event_handler_get_ops()
1067 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, in __scmi_event_handler_get_ops()
1070 refcount_inc(&hndl->users); in __scmi_event_handler_get_ops()
1071 mutex_unlock(&r_evt->proto->registered_mtx); in __scmi_event_handler_get_ops()
1076 hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key); in __scmi_event_handler_get_ops()
1078 refcount_inc(&hndl->users); in __scmi_event_handler_get_ops()
1085 dev_dbg(ni->handle->dev, in __scmi_event_handler_get_ops()
1086 "purging UNKNOWN handler - key:%X\n", in __scmi_event_handler_get_ops()
1087 hndl->key); in __scmi_event_handler_get_ops()
1093 mutex_unlock(&ni->pending_mtx); in __scmi_event_handler_get_ops()
1111 * scmi_get_active_handler() - Helper to get active handlers only
1115 * Search for the desired handler matching the key only in the per-protocol
1130 mutex_lock(&r_evt->proto->registered_mtx); in scmi_get_active_handler()
1131 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, in scmi_get_active_handler()
1134 refcount_inc(&hndl->users); in scmi_get_active_handler()
1135 mutex_unlock(&r_evt->proto->registered_mtx); in scmi_get_active_handler()
1142 * __scmi_enable_evt() - Enable/disable events generation
1145 * @enable: The action to perform: true->Enable, false->Disable
1147 * Takes care of proper refcounting while performing enable/disable: handles
1163 num_sources = r_evt->num_sources; in __scmi_enable_evt()
1164 } else if (src_id < r_evt->num_sources) { in __scmi_enable_evt()
1167 return -EINVAL; in __scmi_enable_evt()
1170 mutex_lock(&r_evt->sources_mtx); in __scmi_enable_evt()
1172 for (; num_sources; src_id++, num_sources--) { in __scmi_enable_evt()
1175 sid = &r_evt->sources[src_id]; in __scmi_enable_evt()
1177 dev_dbg(r_evt->proto->ph->dev, in __scmi_enable_evt()
1178 "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d", in __scmi_enable_evt()
1179 r_evt->proto->id, r_evt->evt->id, in __scmi_enable_evt()
1181 ret = -EOPNOTSUPP; in __scmi_enable_evt()
1183 ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, in __scmi_enable_evt()
1193 for (; num_sources; src_id++, num_sources--) { in __scmi_enable_evt()
1194 sid = &r_evt->sources[src_id]; in __scmi_enable_evt()
1199 r_evt->evt->id, src_id); in __scmi_enable_evt()
1203 mutex_unlock(&r_evt->sources_mtx); in __scmi_enable_evt()
1205 return retvals ? 0 : -EINVAL; in __scmi_enable_evt()
1212 if (!hndl->enabled) { in scmi_enable_events()
1213 ret = __scmi_enable_evt(hndl->r_evt, in scmi_enable_events()
1214 KEY_XTRACT_SRC_ID(hndl->key), true); in scmi_enable_events()
1216 hndl->enabled = true; in scmi_enable_events()
1226 if (hndl->enabled) { in scmi_disable_events()
1227 ret = __scmi_enable_evt(hndl->r_evt, in scmi_disable_events()
1228 KEY_XTRACT_SRC_ID(hndl->key), false); in scmi_disable_events()
1230 hndl->enabled = false; in scmi_disable_events()
1237 * scmi_put_handler_unlocked() - Put an event handler
1255 if (refcount_dec_and_test(&hndl->users)) { in scmi_put_handler_unlocked()
1270 struct scmi_registered_event *r_evt = hndl->r_evt; in scmi_put_handler()
1272 mutex_lock(&ni->pending_mtx); in scmi_put_handler()
1274 protocol_id = r_evt->proto->id; in scmi_put_handler()
1275 mutex_lock(&r_evt->proto->registered_mtx); in scmi_put_handler()
1281 mutex_unlock(&r_evt->proto->registered_mtx); in scmi_put_handler()
1285 * releasing a protocol can trigger its de-initialization in scmi_put_handler()
1289 scmi_protocol_release(ni->handle, protocol_id); in scmi_put_handler()
1291 mutex_unlock(&ni->pending_mtx); in scmi_put_handler()
1298 struct scmi_registered_event *r_evt = hndl->r_evt; in scmi_put_active_handler()
1299 u8 protocol_id = r_evt->proto->id; in scmi_put_active_handler()
1301 mutex_lock(&r_evt->proto->registered_mtx); in scmi_put_active_handler()
1303 mutex_unlock(&r_evt->proto->registered_mtx); in scmi_put_active_handler()
1305 scmi_protocol_release(ni->handle, protocol_id); in scmi_put_active_handler()
1309 * scmi_event_handler_enable_events() - Enable events associated to an handler
1317 pr_err("Failed to ENABLE events for key:%X !\n", hndl->key); in scmi_event_handler_enable_events()
1318 return -EINVAL; in scmi_event_handler_enable_events()
1325 * scmi_notifier_register() - Register a notifier_block for an event
1340 * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
1368 return -ENODEV; in scmi_notifier_register()
1374 return -EINVAL; in scmi_notifier_register()
1376 blocking_notifier_chain_register(&hndl->chain, nb); in scmi_notifier_register()
1389 * scmi_notifier_unregister() - Unregister a notifier_block for an event
1414 return -ENODEV; in scmi_notifier_unregister()
1420 return -EINVAL; in scmi_notifier_unregister()
1426 blocking_notifier_chain_unregister(&hndl->chain, nb); in scmi_notifier_unregister()
1458 scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id, in scmi_devm_release_notifier()
1459 dres->src_id, dres->nb); in scmi_devm_release_notifier()
1463 * scmi_devm_notifier_register() - Managed registration of a notifier_block
1489 return -ENOMEM; in scmi_devm_notifier_register()
1491 ret = scmi_notifier_register(sdev->handle, proto_id, in scmi_devm_notifier_register()
1498 dres->handle = sdev->handle; in scmi_devm_notifier_register()
1499 dres->proto_id = proto_id; in scmi_devm_notifier_register()
1500 dres->evt_id = evt_id; in scmi_devm_notifier_register()
1501 dres->nb = nb; in scmi_devm_notifier_register()
1503 dres->__src_id = *src_id; in scmi_devm_notifier_register()
1504 dres->src_id = &dres->__src_id; in scmi_devm_notifier_register()
1506 dres->src_id = NULL; in scmi_devm_notifier_register()
1508 devres_add(&sdev->dev, dres); in scmi_devm_notifier_register()
1521 return dres->nb == nb; in scmi_devm_notifier_match()
1525 * scmi_devm_notifier_unregister() - Managed un-registration of a
1531 * Generic devres managed helper to explicitly un-register a notifier_block
1542 ret = devres_release(&sdev->dev, scmi_devm_release_notifier, in scmi_devm_notifier_unregister()
1551 * scmi_protocols_late_init() - Worker for late initialization
1572 mutex_lock(&ni->pending_mtx); in scmi_protocols_late_init()
1573 hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) { in scmi_protocols_late_init()
1578 dev_dbg(ni->handle->dev, in scmi_protocols_late_init()
1579 "finalized PENDING handler - key:%X\n", in scmi_protocols_late_init()
1580 hndl->key); in scmi_protocols_late_init()
1583 dev_dbg(ni->handle->dev, in scmi_protocols_late_init()
1584 "purging INVALID handler - key:%X\n", in scmi_protocols_late_init()
1585 hndl->key); in scmi_protocols_late_init()
1591 dev_dbg(ni->handle->dev, in scmi_protocols_late_init()
1592 "purging PENDING handler - key:%X\n", in scmi_protocols_late_init()
1593 hndl->key); in scmi_protocols_late_init()
1599 mutex_unlock(&ni->pending_mtx); in scmi_protocols_late_init()
1614 * scmi_notification_init() - Initializes Notification Core Support
1629 * further per-protocol allocations
1644 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_notification_init()
1646 return -ENOMEM; in scmi_notification_init()
1648 ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL); in scmi_notification_init()
1652 ni->gid = gid; in scmi_notification_init()
1653 ni->handle = handle; in scmi_notification_init()
1655 ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, in scmi_notification_init()
1657 if (!ni->registered_protocols) in scmi_notification_init()
1660 ni->notify_wq = alloc_workqueue(dev_name(handle->dev), in scmi_notification_init()
1663 if (!ni->notify_wq) in scmi_notification_init()
1666 mutex_init(&ni->pending_mtx); in scmi_notification_init()
1667 hash_init(ni->pending_events_handlers); in scmi_notification_init()
1669 INIT_WORK(&ni->init_work, scmi_protocols_late_init); in scmi_notification_init()
1672 handle->notify_ops = ¬ify_ops; in scmi_notification_init()
1676 dev_info(handle->dev, "Core Enabled.\n"); in scmi_notification_init()
1678 devres_close_group(handle->dev, ni->gid); in scmi_notification_init()
1683 dev_warn(handle->dev, "Initialization Failed.\n"); in scmi_notification_init()
1684 devres_release_group(handle->dev, gid); in scmi_notification_init()
1685 return -ENOMEM; in scmi_notification_init()
1689 * scmi_notification_exit() - Shutdown and clean Notification core
1702 destroy_workqueue(ni->notify_wq); in scmi_notification_exit()
1704 devres_release_group(ni->handle->dev, ni->gid); in scmi_notification_exit()