Lines Matching refs:sg_policy

42 	struct sugov_policy	*sg_policy;  member
62 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) in sugov_should_update_freq() argument
81 if (!cpufreq_this_cpu_can_update(sg_policy->policy)) in sugov_should_update_freq()
84 if (unlikely(sg_policy->limits_changed)) { in sugov_should_update_freq()
85 sg_policy->limits_changed = false; in sugov_should_update_freq()
86 sg_policy->need_freq_update = true; in sugov_should_update_freq()
90 delta_ns = time - sg_policy->last_freq_update_time; in sugov_should_update_freq()
92 return delta_ns >= sg_policy->freq_update_delay_ns; in sugov_should_update_freq()
95 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, in sugov_update_next_freq() argument
98 if (sg_policy->need_freq_update) in sugov_update_next_freq()
99 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); in sugov_update_next_freq()
100 else if (sg_policy->next_freq == next_freq) in sugov_update_next_freq()
103 sg_policy->next_freq = next_freq; in sugov_update_next_freq()
104 sg_policy->last_freq_update_time = time; in sugov_update_next_freq()
109 static void sugov_deferred_update(struct sugov_policy *sg_policy) in sugov_deferred_update() argument
111 if (!sg_policy->work_in_progress) { in sugov_deferred_update()
112 sg_policy->work_in_progress = true; in sugov_deferred_update()
113 irq_work_queue(&sg_policy->irq_work); in sugov_deferred_update()
165 static unsigned int get_next_freq(struct sugov_policy *sg_policy, in get_next_freq() argument
168 struct cpufreq_policy *policy = sg_policy->policy; in get_next_freq()
174 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) in get_next_freq()
175 return sg_policy->next_freq; in get_next_freq()
177 sg_policy->cached_raw_freq = freq; in get_next_freq()
368 sg_cpu->sg_policy->limits_changed = true; in ignore_dl_rate_limit()
382 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) in sugov_update_single_common()
395 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single_freq() local
396 unsigned int cached_freq = sg_policy->cached_raw_freq; in sugov_update_single_freq()
405 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap); in sugov_update_single_freq()
407 if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq && in sugov_update_single_freq()
408 !sg_policy->need_freq_update) { in sugov_update_single_freq()
409 next_f = sg_policy->next_freq; in sugov_update_single_freq()
412 sg_policy->cached_raw_freq = cached_freq; in sugov_update_single_freq()
415 if (!sugov_update_next_freq(sg_policy, time, next_f)) in sugov_update_single_freq()
423 if (sg_policy->policy->fast_switch_enabled) { in sugov_update_single_freq()
424 cpufreq_driver_fast_switch(sg_policy->policy, next_f); in sugov_update_single_freq()
426 raw_spin_lock(&sg_policy->update_lock); in sugov_update_single_freq()
427 sugov_deferred_update(sg_policy); in sugov_update_single_freq()
428 raw_spin_unlock(&sg_policy->update_lock); in sugov_update_single_freq()
460 sg_cpu->sg_policy->last_freq_update_time = time; in sugov_update_single_perf()
465 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared() local
466 struct cpufreq_policy *policy = sg_policy->policy; in sugov_next_freq_shared()
482 return get_next_freq(sg_policy, util, max_cap); in sugov_next_freq_shared()
489 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_shared() local
492 raw_spin_lock(&sg_policy->update_lock); in sugov_update_shared()
499 if (sugov_should_update_freq(sg_policy, time)) { in sugov_update_shared()
502 if (!sugov_update_next_freq(sg_policy, time, next_f)) in sugov_update_shared()
505 if (sg_policy->policy->fast_switch_enabled) in sugov_update_shared()
506 cpufreq_driver_fast_switch(sg_policy->policy, next_f); in sugov_update_shared()
508 sugov_deferred_update(sg_policy); in sugov_update_shared()
511 raw_spin_unlock(&sg_policy->update_lock); in sugov_update_shared()
516 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); in sugov_work() local
530 raw_spin_lock_irqsave(&sg_policy->update_lock, flags); in sugov_work()
531 freq = sg_policy->next_freq; in sugov_work()
532 sg_policy->work_in_progress = false; in sugov_work()
533 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); in sugov_work()
535 mutex_lock(&sg_policy->work_lock); in sugov_work()
536 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); in sugov_work()
537 mutex_unlock(&sg_policy->work_lock); in sugov_work()
542 struct sugov_policy *sg_policy; in sugov_irq_work() local
544 sg_policy = container_of(irq_work, struct sugov_policy, irq_work); in sugov_irq_work()
546 kthread_queue_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work()
570 struct sugov_policy *sg_policy; in rate_limit_us_store() local
578 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) in rate_limit_us_store()
579 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; in rate_limit_us_store()
636 struct sugov_policy *sg_policy; in sugov_policy_alloc() local
638 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); in sugov_policy_alloc()
639 if (!sg_policy) in sugov_policy_alloc()
642 sg_policy->policy = policy; in sugov_policy_alloc()
643 raw_spin_lock_init(&sg_policy->update_lock); in sugov_policy_alloc()
644 return sg_policy; in sugov_policy_alloc()
647 static void sugov_policy_free(struct sugov_policy *sg_policy) in sugov_policy_free() argument
649 kfree(sg_policy); in sugov_policy_free()
652 static int sugov_kthread_create(struct sugov_policy *sg_policy) in sugov_kthread_create() argument
669 struct cpufreq_policy *policy = sg_policy->policy; in sugov_kthread_create()
676 kthread_init_work(&sg_policy->work, sugov_work); in sugov_kthread_create()
677 kthread_init_worker(&sg_policy->worker); in sugov_kthread_create()
678 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, in sugov_kthread_create()
693 sg_policy->thread = thread; in sugov_kthread_create()
695 init_irq_work(&sg_policy->irq_work, sugov_irq_work); in sugov_kthread_create()
696 mutex_init(&sg_policy->work_lock); in sugov_kthread_create()
703 static void sugov_kthread_stop(struct sugov_policy *sg_policy) in sugov_kthread_stop() argument
706 if (sg_policy->policy->fast_switch_enabled) in sugov_kthread_stop()
709 kthread_flush_worker(&sg_policy->worker); in sugov_kthread_stop()
710 kthread_stop(sg_policy->thread); in sugov_kthread_stop()
711 mutex_destroy(&sg_policy->work_lock); in sugov_kthread_stop()
714 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) in sugov_tunables_alloc() argument
720 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); in sugov_tunables_alloc()
735 struct sugov_policy *sg_policy; in sugov_init() local
745 sg_policy = sugov_policy_alloc(policy); in sugov_init()
746 if (!sg_policy) { in sugov_init()
751 ret = sugov_kthread_create(sg_policy); in sugov_init()
762 policy->governor_data = sg_policy; in sugov_init()
763 sg_policy->tunables = global_tunables; in sugov_init()
765 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); in sugov_init()
769 tunables = sugov_tunables_alloc(sg_policy); in sugov_init()
777 policy->governor_data = sg_policy; in sugov_init()
778 sg_policy->tunables = tunables; in sugov_init()
798 sugov_kthread_stop(sg_policy); in sugov_init()
802 sugov_policy_free(sg_policy); in sugov_init()
813 struct sugov_policy *sg_policy = policy->governor_data; in sugov_exit() local
814 struct sugov_tunables *tunables = sg_policy->tunables; in sugov_exit()
819 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); in sugov_exit()
826 sugov_kthread_stop(sg_policy); in sugov_exit()
827 sugov_policy_free(sg_policy); in sugov_exit()
835 struct sugov_policy *sg_policy = policy->governor_data; in sugov_start() local
839 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; in sugov_start()
840 sg_policy->last_freq_update_time = 0; in sugov_start()
841 sg_policy->next_freq = 0; in sugov_start()
842 sg_policy->work_in_progress = false; in sugov_start()
843 sg_policy->limits_changed = false; in sugov_start()
844 sg_policy->cached_raw_freq = 0; in sugov_start()
846 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); in sugov_start()
860 sg_cpu->sg_policy = sg_policy; in sugov_start()
868 struct sugov_policy *sg_policy = policy->governor_data; in sugov_stop() local
877 irq_work_sync(&sg_policy->irq_work); in sugov_stop()
878 kthread_cancel_work_sync(&sg_policy->work); in sugov_stop()
884 struct sugov_policy *sg_policy = policy->governor_data; in sugov_limits() local
887 mutex_lock(&sg_policy->work_lock); in sugov_limits()
889 mutex_unlock(&sg_policy->work_lock); in sugov_limits()
892 sg_policy->limits_changed = true; in sugov_limits()