1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_VTIME_H
3 #define _LINUX_KERNEL_VTIME_H
4
5 #include <linux/context_tracking_state.h>
6 #include <linux/sched.h>
7
8 /*
9 * Common vtime APIs
10 */
11 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
12 extern void vtime_account_kernel(struct task_struct *tsk);
13 extern void vtime_account_idle(struct task_struct *tsk);
14 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
15
16 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
17 extern void vtime_user_enter(struct task_struct *tsk);
18 extern void vtime_user_exit(struct task_struct *tsk);
19 extern void vtime_guest_enter(struct task_struct *tsk);
20 extern void vtime_guest_exit(struct task_struct *tsk);
21 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
22 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
vtime_user_enter(struct task_struct * tsk)23 static inline void vtime_user_enter(struct task_struct *tsk) { }
vtime_user_exit(struct task_struct * tsk)24 static inline void vtime_user_exit(struct task_struct *tsk) { }
vtime_guest_enter(struct task_struct * tsk)25 static inline void vtime_guest_enter(struct task_struct *tsk) { }
vtime_guest_exit(struct task_struct * tsk)26 static inline void vtime_guest_exit(struct task_struct *tsk) { }
vtime_init_idle(struct task_struct * tsk,int cpu)27 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
28 #endif
29
30 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
31 extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
32 extern void vtime_account_softirq(struct task_struct *tsk);
33 extern void vtime_account_hardirq(struct task_struct *tsk);
34 extern void vtime_flush(struct task_struct *tsk);
35 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
vtime_account_irq(struct task_struct * tsk,unsigned int offset)36 static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
vtime_account_softirq(struct task_struct * tsk)37 static inline void vtime_account_softirq(struct task_struct *tsk) { }
vtime_account_hardirq(struct task_struct * tsk)38 static inline void vtime_account_hardirq(struct task_struct *tsk) { }
vtime_flush(struct task_struct * tsk)39 static inline void vtime_flush(struct task_struct *tsk) { }
40 #endif
41
42 /*
43 * vtime_accounting_enabled_this_cpu() definitions/declarations
44 */
45 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
46
vtime_accounting_enabled_this_cpu(void)47 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
48 extern void vtime_task_switch(struct task_struct *prev);
49
vtime_account_guest_enter(void)50 static __always_inline void vtime_account_guest_enter(void)
51 {
52 vtime_account_kernel(current);
53 current->flags |= PF_VCPU;
54 }
55
vtime_account_guest_exit(void)56 static __always_inline void vtime_account_guest_exit(void)
57 {
58 vtime_account_kernel(current);
59 current->flags &= ~PF_VCPU;
60 }
61
62 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
63
64 /*
65 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
66 * in that case and compute the tickless cputime.
67 * For now vtime state is tied to context tracking. We might want to decouple
68 * those later if necessary.
69 */
vtime_accounting_enabled(void)70 static inline bool vtime_accounting_enabled(void)
71 {
72 return context_tracking_enabled();
73 }
74
vtime_accounting_enabled_cpu(int cpu)75 static inline bool vtime_accounting_enabled_cpu(int cpu)
76 {
77 return context_tracking_enabled_cpu(cpu);
78 }
79
vtime_accounting_enabled_this_cpu(void)80 static inline bool vtime_accounting_enabled_this_cpu(void)
81 {
82 return context_tracking_enabled_this_cpu();
83 }
84
85 extern void vtime_task_switch_generic(struct task_struct *prev);
86
vtime_task_switch(struct task_struct * prev)87 static inline void vtime_task_switch(struct task_struct *prev)
88 {
89 if (vtime_accounting_enabled_this_cpu())
90 vtime_task_switch_generic(prev);
91 }
92
vtime_account_guest_enter(void)93 static __always_inline void vtime_account_guest_enter(void)
94 {
95 if (vtime_accounting_enabled_this_cpu())
96 vtime_guest_enter(current);
97 else
98 current->flags |= PF_VCPU;
99 }
100
vtime_account_guest_exit(void)101 static __always_inline void vtime_account_guest_exit(void)
102 {
103 if (vtime_accounting_enabled_this_cpu())
104 vtime_guest_exit(current);
105 else
106 current->flags &= ~PF_VCPU;
107 }
108
109 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
110
vtime_accounting_enabled_this_cpu(void)111 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
vtime_task_switch(struct task_struct * prev)112 static inline void vtime_task_switch(struct task_struct *prev) { }
113
vtime_account_guest_enter(void)114 static __always_inline void vtime_account_guest_enter(void)
115 {
116 current->flags |= PF_VCPU;
117 }
118
vtime_account_guest_exit(void)119 static __always_inline void vtime_account_guest_exit(void)
120 {
121 current->flags &= ~PF_VCPU;
122 }
123
124 #endif
125
126
127 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
128 extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
129 #else
irqtime_account_irq(struct task_struct * tsk,unsigned int offset)130 static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
131 #endif
132
account_softirq_enter(struct task_struct * tsk)133 static inline void account_softirq_enter(struct task_struct *tsk)
134 {
135 vtime_account_irq(tsk, SOFTIRQ_OFFSET);
136 irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
137 }
138
account_softirq_exit(struct task_struct * tsk)139 static inline void account_softirq_exit(struct task_struct *tsk)
140 {
141 vtime_account_softirq(tsk);
142 irqtime_account_irq(tsk, 0);
143 }
144
account_hardirq_enter(struct task_struct * tsk)145 static inline void account_hardirq_enter(struct task_struct *tsk)
146 {
147 vtime_account_irq(tsk, HARDIRQ_OFFSET);
148 irqtime_account_irq(tsk, HARDIRQ_OFFSET);
149 }
150
account_hardirq_exit(struct task_struct * tsk)151 static inline void account_hardirq_exit(struct task_struct *tsk)
152 {
153 vtime_account_hardirq(tsk);
154 irqtime_account_irq(tsk, 0);
155 }
156
157 #endif /* _LINUX_KERNEL_VTIME_H */
158