Lines Matching +full:pressure +full:- +full:min

1 // SPDX-License-Identifier: GPL-2.0
3 * Pressure stall information for CPU, memory and IO
16 * This code aggregates individual task delays into resource pressure
23 * productivity. Pressure expresses the amount of time in which this
27 * the CPU. To measure the impact of pressure on both, we define two
34 * In the FULL state of a given resource, all non-idle tasks are
44 * CPU, productive means an on-CPU task.
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
53 * states gives pressure numbers between 0 and 100 for each resource,
64 * unrealized due to resource contention *also* scales with non-idle
69 * states, we would have to conclude a CPU SOME pressure number of
78 * pressure number of 0%, since *somebody* is always making forward
82 * To calculate wasted potential (pressure) with multiple processors,
83 * we have to base our calculation on the number of non-idle tasks in
89 * threads = min(nr_nonidle_tasks, nr_cpus)
90 * SOME = min(nr_delayed_tasks / threads, 1)
91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
95 * threads = min(257, 256)
96 * SOME = min(1 / 256, 1) = 0.4%
97 * FULL = (256 - min(256, 256)) / 256 = 0%
99 * For the 1 out of 4 memory-delayed tasks, this yields:
101 * threads = min(4, 4)
102 * SOME = min(1 / 4, 1) = 25%
103 * FULL = (4 - min(3, 4)) / 4 = 25%
106 * extension of the single-CPU model. ]
135 * This gives us an approximation of pressure that is practical
136 * cost-wise, yet way more sensitive and accurate than periodic
156 /* Running averages - we need to be higher-res than loadavg */
158 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
169 /* System-level pressure and stall tracking */
183 group->enabled = true; in group_init()
185 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); in group_init()
186 group->avg_last_update = sched_clock(); in group_init()
187 group->avg_next_update = group->avg_last_update + psi_period; in group_init()
188 mutex_init(&group->avgs_lock); in group_init()
190 /* Init avg trigger-related members */ in group_init()
191 INIT_LIST_HEAD(&group->avg_triggers); in group_init()
192 memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers)); in group_init()
193 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); in group_init()
195 /* Init rtpoll trigger-related members */ in group_init()
196 atomic_set(&group->rtpoll_scheduled, 0); in group_init()
197 mutex_init(&group->rtpoll_trigger_lock); in group_init()
198 INIT_LIST_HEAD(&group->rtpoll_triggers); in group_init()
199 group->rtpoll_min_period = U32_MAX; in group_init()
200 group->rtpoll_next_update = ULLONG_MAX; in group_init()
201 init_waitqueue_head(&group->rtpoll_wait); in group_init()
202 timer_setup(&group->rtpoll_timer, poll_timer_fn, 0); in group_init()
203 rcu_assign_pointer(group->rtpoll_task, NULL); in group_init()
253 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); in get_recent_times()
265 seq = read_seqcount_begin(&groupc->seq); in get_recent_times()
267 memcpy(times, groupc->times, sizeof(groupc->times)); in get_recent_times()
268 state_mask = groupc->state_mask; in get_recent_times()
269 state_start = groupc->state_start; in get_recent_times()
271 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); in get_recent_times()
272 } while (read_seqcount_retry(&groupc->seq, seq)); in get_recent_times()
283 * (u32) and our reported pressure close to what's in get_recent_times()
287 times[s] += now - state_start; in get_recent_times()
289 delta = times[s] - groupc->times_prev[aggregator][s]; in get_recent_times()
290 groupc->times_prev[aggregator][s] = times[s]; in get_recent_times()
299 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running in get_recent_times()
301 * So for the current CPU, we need to re-arm avgs_work only when in get_recent_times()
305 if (current_work() == &group->avgs_work.work) { in get_recent_times()
344 u64 deltas[NR_PSI_STATES - 1] = { 0, }; in collect_percpu_times()
351 * Collect the per-cpu time buckets and average them into a in collect_percpu_times()
354 * For averaging, each CPU is weighted by its non-idle time in in collect_percpu_times()
379 * Pressure percentages are sampled at PSI_FREQ. We might be in collect_percpu_times()
387 for (s = 0; s < NR_PSI_STATES - 1; s++) in collect_percpu_times()
388 group->total[aggregator][s] += in collect_percpu_times()
399 win->start_time = now; in window_reset()
400 win->start_value = value; in window_reset()
401 win->prev_growth = prev_growth; in window_reset()
420 elapsed = now - win->start_time; in window_update()
421 growth = value - win->start_value; in window_update()
423 * After each tracking window passes win->start_value and in window_update()
424 * win->start_time get reset and win->prev_growth stores in window_update()
425 * the average per-window growth of the previous window. in window_update()
426 * win->prev_growth is then used to interpolate additional in window_update()
429 if (elapsed > win->size) in window_update()
434 remaining = win->size - elapsed; in window_update()
435 growth += div64_u64(win->prev_growth * remaining, win->size); in window_update()
445 u64 *total = group->total[aggregator]; in update_triggers()
450 triggers = &group->avg_triggers; in update_triggers()
451 aggregator_total = group->avg_total; in update_triggers()
453 triggers = &group->rtpoll_triggers; in update_triggers()
454 aggregator_total = group->rtpoll_total; in update_triggers()
465 new_stall = aggregator_total[t->state] != total[t->state]; in update_triggers()
468 if (!new_stall && !t->pending_event) in update_triggers()
478 growth = window_update(&t->win, now, total[t->state]); in update_triggers()
479 if (!t->pending_event) { in update_triggers()
480 if (growth < t->threshold) in update_triggers()
483 t->pending_event = true; in update_triggers()
487 if (now < t->last_event_time + t->win.size) in update_triggers()
491 if (cmpxchg(&t->event, 0, 1) == 0) { in update_triggers()
492 if (t->of) in update_triggers()
493 kernfs_notify(t->of->kn); in update_triggers()
495 wake_up_interruptible(&t->event_wait); in update_triggers()
497 t->last_event_time = now; in update_triggers()
499 t->pending_event = false; in update_triggers()
511 expires = group->avg_next_update; in update_averages()
512 if (now - expires >= psi_period) in update_averages()
513 missed_periods = div_u64(now - expires, psi_period); in update_averages()
519 * But the deltas we sample out of the per-cpu buckets above in update_averages()
523 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages()
524 group->avg_last_update = now; in update_averages()
526 for (s = 0; s < NR_PSI_STATES - 1; s++) { in update_averages()
529 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; in update_averages()
533 * which under full pressure can result in samples in in update_averages()
536 * We don't want to report non-sensical pressures in in update_averages()
539 * future until pressure subsides. By doing this we in update_averages()
540 * don't underreport the occurring pressure curve, we in update_averages()
549 group->avg_total[s] += sample; in update_averages()
550 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages()
566 mutex_lock(&group->avgs_lock); in psi_avgs_work()
572 * If there is task activity, periodically fold the per-cpu in psi_avgs_work()
576 * go - see calc_avgs() and missed_periods. in psi_avgs_work()
578 if (now >= group->avg_next_update) { in psi_avgs_work()
580 group->avg_next_update = update_averages(group, now); in psi_avgs_work()
585 group->avg_next_update - now) + 1); in psi_avgs_work()
588 mutex_unlock(&group->avgs_lock); in psi_avgs_work()
595 list_for_each_entry(t, &group->rtpoll_triggers, node) in init_rtpoll_triggers()
596 window_reset(&t->win, now, in init_rtpoll_triggers()
597 group->total[PSI_POLL][t->state], 0); in init_rtpoll_triggers()
598 memcpy(group->rtpoll_total, group->total[PSI_POLL], in init_rtpoll_triggers()
599 sizeof(group->rtpoll_total)); in init_rtpoll_triggers()
600 group->rtpoll_next_update = now + group->rtpoll_min_period; in init_rtpoll_triggers()
613 if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force) in psi_schedule_rtpoll_work()
618 task = rcu_dereference(group->rtpoll_task); in psi_schedule_rtpoll_work()
624 mod_timer(&group->rtpoll_timer, jiffies + delay); in psi_schedule_rtpoll_work()
626 atomic_set(&group->rtpoll_scheduled, 0); in psi_schedule_rtpoll_work()
637 mutex_lock(&group->rtpoll_trigger_lock); in psi_rtpoll_work()
641 if (now > group->rtpoll_until) { in psi_rtpoll_work()
651 atomic_set(&group->rtpoll_scheduled, 0); in psi_rtpoll_work()
680 if (changed_states & group->rtpoll_states) { in psi_rtpoll_work()
682 if (now > group->rtpoll_until) in psi_rtpoll_work()
690 group->rtpoll_until = now + in psi_rtpoll_work()
691 group->rtpoll_min_period * UPDATES_PER_WINDOW; in psi_rtpoll_work()
694 if (now > group->rtpoll_until) { in psi_rtpoll_work()
695 group->rtpoll_next_update = ULLONG_MAX; in psi_rtpoll_work()
699 if (now >= group->rtpoll_next_update) { in psi_rtpoll_work()
700 if (changed_states & group->rtpoll_states) { in psi_rtpoll_work()
702 memcpy(group->rtpoll_total, group->total[PSI_POLL], in psi_rtpoll_work()
703 sizeof(group->rtpoll_total)); in psi_rtpoll_work()
705 group->rtpoll_next_update = now + group->rtpoll_min_period; in psi_rtpoll_work()
709 nsecs_to_jiffies(group->rtpoll_next_update - now) + 1, in psi_rtpoll_work()
713 mutex_unlock(&group->rtpoll_trigger_lock); in psi_rtpoll_work()
723 wait_event_interruptible(group->rtpoll_wait, in psi_rtpoll_worker()
724 atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) || in psi_rtpoll_worker()
738 atomic_set(&group->rtpoll_wakeup, 1); in poll_timer_fn()
739 wake_up_interruptible(&group->rtpoll_wait); in poll_timer_fn()
746 delta = now - groupc->state_start; in record_times()
747 groupc->state_start = now; in record_times()
749 if (groupc->state_mask & (1 << PSI_IO_SOME)) { in record_times()
750 groupc->times[PSI_IO_SOME] += delta; in record_times()
751 if (groupc->state_mask & (1 << PSI_IO_FULL)) in record_times()
752 groupc->times[PSI_IO_FULL] += delta; in record_times()
755 if (groupc->state_mask & (1 << PSI_MEM_SOME)) { in record_times()
756 groupc->times[PSI_MEM_SOME] += delta; in record_times()
757 if (groupc->state_mask & (1 << PSI_MEM_FULL)) in record_times()
758 groupc->times[PSI_MEM_FULL] += delta; in record_times()
761 if (groupc->state_mask & (1 << PSI_CPU_SOME)) { in record_times()
762 groupc->times[PSI_CPU_SOME] += delta; in record_times()
763 if (groupc->state_mask & (1 << PSI_CPU_FULL)) in record_times()
764 groupc->times[PSI_CPU_FULL] += delta; in record_times()
767 if (groupc->state_mask & (1 << PSI_NONIDLE)) in record_times()
768 groupc->times[PSI_NONIDLE] += delta; in record_times()
781 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_group_change()
792 write_seqcount_begin(&groupc->seq); in psi_group_change()
797 * task count - it's just a boolean flag directly encoded in in psi_group_change()
808 state_mask = groupc->state_mask & PSI_ONCPU; in psi_group_change()
818 if (groupc->tasks[t]) { in psi_group_change()
819 groupc->tasks[t]--; in psi_group_change()
822 cpu, t, groupc->tasks[0], in psi_group_change()
823 groupc->tasks[1], groupc->tasks[2], in psi_group_change()
824 groupc->tasks[3], clear, set); in psi_group_change()
831 groupc->tasks[t]++; in psi_group_change()
833 if (!group->enabled) { in psi_group_change()
839 * avoid a delta sample underflow when PSI is later re-enabled. in psi_group_change()
841 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE))) in psi_group_change()
844 groupc->state_mask = state_mask; in psi_group_change()
846 write_seqcount_end(&groupc->seq); in psi_group_change()
850 state_mask = test_states(groupc->tasks, state_mask); in psi_group_change()
860 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall)) in psi_group_change()
865 groupc->state_mask = state_mask; in psi_group_change()
867 write_seqcount_end(&groupc->seq); in psi_group_change()
869 if (state_mask & group->rtpoll_states) in psi_group_change()
872 if (wake_clock && !delayed_work_pending(&group->avgs_work)) in psi_group_change()
873 schedule_delayed_work(&group->avgs_work, PSI_FREQ); in psi_group_change()
887 if (((task->psi_flags & set) || in psi_flags_change()
888 (task->psi_flags & clear) != clear) && in psi_flags_change()
891 task->pid, task->comm, task_cpu(task), in psi_flags_change()
892 task->psi_flags, clear, set); in psi_flags_change()
896 task->psi_flags &= ~clear; in psi_flags_change()
897 task->psi_flags |= set; in psi_flags_change()
905 if (!task->pid) in psi_task_change()
913 } while ((group = group->parent)); in psi_task_change()
922 if (next->pid) { in psi_task_switch()
931 if (per_cpu_ptr(group->pcpu, cpu)->state_mask & in psi_task_switch()
938 } while ((group = group->parent)); in psi_task_switch()
941 if (prev->pid) { in psi_task_switch()
953 if (prev->in_memstall) in psi_task_switch()
955 if (prev->in_iowait) in psi_task_switch()
962 * itself going to sleep, or we'll ping-pong forever. in psi_task_switch()
964 if (unlikely((prev->flags & PF_WQ_WORKER) && in psi_task_switch()
976 } while ((group = group->parent)); in psi_task_switch()
984 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { in psi_task_switch()
986 for (; group; group = group->parent) in psi_task_switch()
1004 if (!curr->pid) in psi_account_irqtime()
1013 delta = (s64)(irq - rq->psi_irq_time); in psi_account_irqtime()
1016 rq->psi_irq_time = irq; in psi_account_irqtime()
1021 if (!group->enabled) in psi_account_irqtime()
1024 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_account_irqtime()
1026 write_seqcount_begin(&groupc->seq); in psi_account_irqtime()
1030 groupc->times[PSI_IRQ_FULL] += delta; in psi_account_irqtime()
1032 write_seqcount_end(&groupc->seq); in psi_account_irqtime()
1034 if (group->rtpoll_states & (1 << PSI_IRQ_FULL)) in psi_account_irqtime()
1036 } while ((group = group->parent)); in psi_account_irqtime()
1041 * psi_memstall_enter - mark the beginning of a memory stall section
1055 *flags = current->in_memstall; in psi_memstall_enter()
1065 current->in_memstall = 1; in psi_memstall_enter()
1073 * psi_memstall_leave - mark the end of an memory stall section
1095 current->in_memstall = 0; in psi_memstall_leave()
1108 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL); in psi_cgroup_alloc()
1109 if (!cgroup->psi) in psi_cgroup_alloc()
1110 return -ENOMEM; in psi_cgroup_alloc()
1112 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu); in psi_cgroup_alloc()
1113 if (!cgroup->psi->pcpu) { in psi_cgroup_alloc()
1114 kfree(cgroup->psi); in psi_cgroup_alloc()
1115 return -ENOMEM; in psi_cgroup_alloc()
1117 group_init(cgroup->psi); in psi_cgroup_alloc()
1118 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup)); in psi_cgroup_alloc()
1127 cancel_delayed_work_sync(&cgroup->psi->avgs_work); in psi_cgroup_free()
1128 free_percpu(cgroup->psi->pcpu); in psi_cgroup_free()
1130 WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n"); in psi_cgroup_free()
1131 kfree(cgroup->psi); in psi_cgroup_free()
1135 * cgroup_move_task - move task to a different cgroup
1143 * changes to the task's scheduling state and - in case the task is
1144 * running - concurrent changes to its stall state.
1157 rcu_assign_pointer(task->cgroups, to); in cgroup_move_task()
1173 * p->on_rq = 0 in cgroup_move_task()
1179 * task->cgroups = to in cgroup_move_task()
1187 task_flags = task->psi_flags; in cgroup_move_task()
1193 rcu_assign_pointer(task->cgroups, to); in cgroup_move_task()
1206 * After we disable psi_group->enabled, we don't actually in psi_cgroup_restart()
1212 * since cgroup pressure files are hidden and percpu psi_group_cpu in psi_cgroup_restart()
1213 * would see !psi_group->enabled and only do task accounting. in psi_cgroup_restart()
1215 * When re-enable cgroup PSI, this function use psi_group_change() in psi_cgroup_restart()
1217 * and restart groupc->state_start from now, use .clear = .set = 0 in psi_cgroup_restart()
1220 if (!group->enabled) in psi_cgroup_restart()
1241 return -EOPNOTSUPP; in psi_show()
1244 mutex_lock(&group->avgs_lock); in psi_show()
1247 if (now >= group->avg_next_update) in psi_show()
1248 group->avg_next_update = update_averages(group, now); in psi_show()
1249 mutex_unlock(&group->avgs_lock); in psi_show()
1255 for (full = 0; full < 2 - only_full; full++) { in psi_show()
1263 avg[w] = group->avg[res * 2 + full][w]; in psi_show()
1264 total = div_u64(group->total[PSI_AVGS][res * 2 + full], in psi_show()
1290 return ERR_PTR(-EOPNOTSUPP); in psi_trigger_create()
1293 * Checking the privilege here on file->f_cred implies that a privileged user in psi_trigger_create()
1296 privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE); in psi_trigger_create()
1303 return ERR_PTR(-EINVAL); in psi_trigger_create()
1306 if (res == PSI_IRQ && --state != PSI_IRQ_FULL) in psi_trigger_create()
1307 return ERR_PTR(-EINVAL); in psi_trigger_create()
1311 return ERR_PTR(-EINVAL); in psi_trigger_create()
1314 return ERR_PTR(-EINVAL); in psi_trigger_create()
1321 return ERR_PTR(-EINVAL); in psi_trigger_create()
1325 return ERR_PTR(-EINVAL); in psi_trigger_create()
1329 return ERR_PTR(-ENOMEM); in psi_trigger_create()
1331 t->group = group; in psi_trigger_create()
1332 t->state = state; in psi_trigger_create()
1333 t->threshold = threshold_us * NSEC_PER_USEC; in psi_trigger_create()
1334 t->win.size = window_us * NSEC_PER_USEC; in psi_trigger_create()
1335 window_reset(&t->win, sched_clock(), in psi_trigger_create()
1336 group->total[PSI_POLL][t->state], 0); in psi_trigger_create()
1338 t->event = 0; in psi_trigger_create()
1339 t->last_event_time = 0; in psi_trigger_create()
1340 t->of = of; in psi_trigger_create()
1342 init_waitqueue_head(&t->event_wait); in psi_trigger_create()
1343 t->pending_event = false; in psi_trigger_create()
1344 t->aggregator = privileged ? PSI_POLL : PSI_AVGS; in psi_trigger_create()
1347 mutex_lock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1349 if (!rcu_access_pointer(group->rtpoll_task)) { in psi_trigger_create()
1355 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1358 atomic_set(&group->rtpoll_wakeup, 0); in psi_trigger_create()
1360 rcu_assign_pointer(group->rtpoll_task, task); in psi_trigger_create()
1363 list_add(&t->node, &group->rtpoll_triggers); in psi_trigger_create()
1364 group->rtpoll_min_period = min(group->rtpoll_min_period, in psi_trigger_create()
1365 div_u64(t->win.size, UPDATES_PER_WINDOW)); in psi_trigger_create()
1366 group->rtpoll_nr_triggers[t->state]++; in psi_trigger_create()
1367 group->rtpoll_states |= (1 << t->state); in psi_trigger_create()
1369 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1371 mutex_lock(&group->avgs_lock); in psi_trigger_create()
1373 list_add(&t->node, &group->avg_triggers); in psi_trigger_create()
1374 group->avg_nr_triggers[t->state]++; in psi_trigger_create()
1376 mutex_unlock(&group->avgs_lock); in psi_trigger_create()
1393 group = t->group; in psi_trigger_destroy()
1399 if (t->of) in psi_trigger_destroy()
1400 kernfs_notify(t->of->kn); in psi_trigger_destroy()
1402 wake_up_interruptible(&t->event_wait); in psi_trigger_destroy()
1404 if (t->aggregator == PSI_AVGS) { in psi_trigger_destroy()
1405 mutex_lock(&group->avgs_lock); in psi_trigger_destroy()
1406 if (!list_empty(&t->node)) { in psi_trigger_destroy()
1407 list_del(&t->node); in psi_trigger_destroy()
1408 group->avg_nr_triggers[t->state]--; in psi_trigger_destroy()
1410 mutex_unlock(&group->avgs_lock); in psi_trigger_destroy()
1412 mutex_lock(&group->rtpoll_trigger_lock); in psi_trigger_destroy()
1413 if (!list_empty(&t->node)) { in psi_trigger_destroy()
1417 list_del(&t->node); in psi_trigger_destroy()
1418 group->rtpoll_nr_triggers[t->state]--; in psi_trigger_destroy()
1419 if (!group->rtpoll_nr_triggers[t->state]) in psi_trigger_destroy()
1420 group->rtpoll_states &= ~(1 << t->state); in psi_trigger_destroy()
1422 * Reset min update period for the remaining triggers in psi_trigger_destroy()
1423 * iff the destroying trigger had the min window size. in psi_trigger_destroy()
1425 if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) { in psi_trigger_destroy()
1426 list_for_each_entry(tmp, &group->rtpoll_triggers, node) in psi_trigger_destroy()
1427 period = min(period, div_u64(tmp->win.size, in psi_trigger_destroy()
1429 group->rtpoll_min_period = period; in psi_trigger_destroy()
1432 if (group->rtpoll_states == 0) { in psi_trigger_destroy()
1433 group->rtpoll_until = 0; in psi_trigger_destroy()
1435 group->rtpoll_task, in psi_trigger_destroy()
1436 lockdep_is_held(&group->rtpoll_trigger_lock)); in psi_trigger_destroy()
1437 rcu_assign_pointer(group->rtpoll_task, NULL); in psi_trigger_destroy()
1438 del_timer(&group->rtpoll_timer); in psi_trigger_destroy()
1441 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_destroy()
1445 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side in psi_trigger_destroy()
1458 * can no longer be found through group->rtpoll_task. in psi_trigger_destroy()
1461 atomic_set(&group->rtpoll_scheduled, 0); in psi_trigger_destroy()
1479 if (t->of) in psi_trigger_poll()
1480 kernfs_generic_poll(t->of, wait); in psi_trigger_poll()
1482 poll_wait(file, &t->event_wait, wait); in psi_trigger_poll()
1484 if (cmpxchg(&t->event, 1, 0) == 1) in psi_trigger_poll()
1530 return -EOPNOTSUPP; in psi_write()
1533 return -EINVAL; in psi_write()
1535 buf_size = min(nbytes, sizeof(buf)); in psi_write()
1537 return -EFAULT; in psi_write()
1539 buf[buf_size - 1] = '\0'; in psi_write()
1541 seq = file->private_data; in psi_write()
1543 /* Take seq->lock to protect seq->private from concurrent writes */ in psi_write()
1544 mutex_lock(&seq->lock); in psi_write()
1547 if (seq->private) { in psi_write()
1548 mutex_unlock(&seq->lock); in psi_write()
1549 return -EBUSY; in psi_write()
1554 mutex_unlock(&seq->lock); in psi_write()
1558 smp_store_release(&seq->private, new); in psi_write()
1559 mutex_unlock(&seq->lock); in psi_write()
1584 struct seq_file *seq = file->private_data; in psi_fop_poll()
1586 return psi_trigger_poll(&seq->private, file, wait); in psi_fop_poll()
1591 struct seq_file *seq = file->private_data; in psi_fop_release()
1593 psi_trigger_destroy(seq->private); in psi_fop_release()
1654 proc_mkdir("pressure", NULL); in psi_proc_init()
1655 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops); in psi_proc_init()
1656 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops); in psi_proc_init()
1657 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops); in psi_proc_init()
1659 proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops); in psi_proc_init()