1  /*
2   *  include/linux/ktime.h
3   *
4   *  ktime_t - nanosecond-resolution time format.
5   *
6   *   Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
7   *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
8   *
9   *  data type definitions, declarations, prototypes and macros.
10   *
11   *  Started by: Thomas Gleixner and Ingo Molnar
12   *
13   *  Credits:
14   *
15   *  	Roman Zippel provided the ideas and primary code snippets of
16   *  	the ktime_t union and further simplifications of the original
17   *  	code.
18   *
19   *  For licencing details see kernel-base/COPYING
20   */
21  #ifndef _LINUX_KTIME_H
22  #define _LINUX_KTIME_H
23  
24  #include <asm/bug.h>
25  #include <linux/jiffies.h>
26  #include <linux/time.h>
27  #include <linux/types.h>
28  
29  /**
30   * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
31   * @secs:	seconds to set
32   * @nsecs:	nanoseconds to set
33   *
34   * Return: The ktime_t representation of the value.
35   */
ktime_set(const s64 secs,const unsigned long nsecs)36  static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
37  {
38  	if (unlikely(secs >= KTIME_SEC_MAX))
39  		return KTIME_MAX;
40  
41  	return secs * NSEC_PER_SEC + (s64)nsecs;
42  }
43  
44  /* Subtract two ktime_t variables. rem = lhs -rhs: */
45  #define ktime_sub(lhs, rhs)	((lhs) - (rhs))
46  
47  /* Add two ktime_t variables. res = lhs + rhs: */
48  #define ktime_add(lhs, rhs)	((lhs) + (rhs))
49  
50  /*
51   * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
52   * this means that you must check the result for overflow yourself.
53   */
54  #define ktime_add_unsafe(lhs, rhs)	((u64) (lhs) + (rhs))
55  
56  /*
57   * Add a ktime_t variable and a scalar nanosecond value.
58   * res = kt + nsval:
59   */
60  #define ktime_add_ns(kt, nsval)		((kt) + (nsval))
61  
62  /*
63   * Subtract a scalar nanosecod from a ktime_t variable
64   * res = kt - nsval:
65   */
66  #define ktime_sub_ns(kt, nsval)		((kt) - (nsval))
67  
68  /* convert a timespec64 to ktime_t format: */
timespec64_to_ktime(struct timespec64 ts)69  static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
70  {
71  	return ktime_set(ts.tv_sec, ts.tv_nsec);
72  }
73  
74  /* Map the ktime_t to timespec conversion to ns_to_timespec function */
75  #define ktime_to_timespec64(kt)		ns_to_timespec64((kt))
76  
77  /* Convert ktime_t to nanoseconds */
ktime_to_ns(const ktime_t kt)78  static inline s64 ktime_to_ns(const ktime_t kt)
79  {
80  	return kt;
81  }
82  
83  /**
84   * ktime_compare - Compares two ktime_t variables for less, greater or equal
85   * @cmp1:	comparable1
86   * @cmp2:	comparable2
87   *
88   * Return: ...
89   *   cmp1  < cmp2: return <0
90   *   cmp1 == cmp2: return 0
91   *   cmp1  > cmp2: return >0
92   */
ktime_compare(const ktime_t cmp1,const ktime_t cmp2)93  static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
94  {
95  	if (cmp1 < cmp2)
96  		return -1;
97  	if (cmp1 > cmp2)
98  		return 1;
99  	return 0;
100  }
101  
102  /**
103   * ktime_after - Compare if a ktime_t value is bigger than another one.
104   * @cmp1:	comparable1
105   * @cmp2:	comparable2
106   *
107   * Return: true if cmp1 happened after cmp2.
108   */
ktime_after(const ktime_t cmp1,const ktime_t cmp2)109  static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
110  {
111  	return ktime_compare(cmp1, cmp2) > 0;
112  }
113  
114  /**
115   * ktime_before - Compare if a ktime_t value is smaller than another one.
116   * @cmp1:	comparable1
117   * @cmp2:	comparable2
118   *
119   * Return: true if cmp1 happened before cmp2.
120   */
ktime_before(const ktime_t cmp1,const ktime_t cmp2)121  static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
122  {
123  	return ktime_compare(cmp1, cmp2) < 0;
124  }
125  
126  #if BITS_PER_LONG < 64
127  extern s64 __ktime_divns(const ktime_t kt, s64 div);
ktime_divns(const ktime_t kt,s64 div)128  static inline s64 ktime_divns(const ktime_t kt, s64 div)
129  {
130  	/*
131  	 * Negative divisors could cause an inf loop,
132  	 * so bug out here.
133  	 */
134  	BUG_ON(div < 0);
135  	if (__builtin_constant_p(div) && !(div >> 32)) {
136  		s64 ns = kt;
137  		u64 tmp = ns < 0 ? -ns : ns;
138  
139  		do_div(tmp, div);
140  		return ns < 0 ? -tmp : tmp;
141  	} else {
142  		return __ktime_divns(kt, div);
143  	}
144  }
145  #else /* BITS_PER_LONG < 64 */
ktime_divns(const ktime_t kt,s64 div)146  static inline s64 ktime_divns(const ktime_t kt, s64 div)
147  {
148  	/*
149  	 * 32-bit implementation cannot handle negative divisors,
150  	 * so catch them on 64bit as well.
151  	 */
152  	WARN_ON(div < 0);
153  	return kt / div;
154  }
155  #endif
156  
ktime_to_us(const ktime_t kt)157  static inline s64 ktime_to_us(const ktime_t kt)
158  {
159  	return ktime_divns(kt, NSEC_PER_USEC);
160  }
161  
ktime_to_ms(const ktime_t kt)162  static inline s64 ktime_to_ms(const ktime_t kt)
163  {
164  	return ktime_divns(kt, NSEC_PER_MSEC);
165  }
166  
ktime_us_delta(const ktime_t later,const ktime_t earlier)167  static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
168  {
169         return ktime_to_us(ktime_sub(later, earlier));
170  }
171  
ktime_ms_delta(const ktime_t later,const ktime_t earlier)172  static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
173  {
174  	return ktime_to_ms(ktime_sub(later, earlier));
175  }
176  
ktime_add_us(const ktime_t kt,const u64 usec)177  static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
178  {
179  	return ktime_add_ns(kt, usec * NSEC_PER_USEC);
180  }
181  
ktime_add_ms(const ktime_t kt,const u64 msec)182  static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
183  {
184  	return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
185  }
186  
ktime_sub_us(const ktime_t kt,const u64 usec)187  static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
188  {
189  	return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
190  }
191  
ktime_sub_ms(const ktime_t kt,const u64 msec)192  static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
193  {
194  	return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
195  }
196  
197  extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
198  
199  /**
200   * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
201   *			    format only if the variable contains data
202   * @kt:		the ktime_t variable to convert
203   * @ts:		the timespec variable to store the result in
204   *
205   * Return: %true if there was a successful conversion, %false if kt was 0.
206   */
ktime_to_timespec64_cond(const ktime_t kt,struct timespec64 * ts)207  static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
208  						       struct timespec64 *ts)
209  {
210  	if (kt) {
211  		*ts = ktime_to_timespec64(kt);
212  		return true;
213  	} else {
214  		return false;
215  	}
216  }
217  
218  #include <vdso/ktime.h>
219  
ns_to_ktime(u64 ns)220  static inline ktime_t ns_to_ktime(u64 ns)
221  {
222  	return ns;
223  }
224  
ms_to_ktime(u64 ms)225  static inline ktime_t ms_to_ktime(u64 ms)
226  {
227  	return ms * NSEC_PER_MSEC;
228  }
229  
230  # include <linux/timekeeping.h>
231  
232  #endif
233