Lines Matching full:ref

21  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22 * issuing the appropriate barriers, and then marks the ref as shutting down so
23 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
24 * it's safe to drop the initial ref.
35 * and it's then safe to drop the initial ref with percpu_ref_put().
74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
76 * with this flag, the ref will stay in atomic mode until
83 * Start dead w/ ref == 0 in atomic mode. Must be revived with
102 struct percpu_ref *ref; member
107 * The low bit of the pointer indicates whether the ref is in percpu
121 int __must_check percpu_ref_init(struct percpu_ref *ref,
124 void percpu_ref_exit(struct percpu_ref *ref);
125 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
127 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
128 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
129 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
131 void percpu_ref_resurrect(struct percpu_ref *ref);
132 void percpu_ref_reinit(struct percpu_ref *ref);
133 bool percpu_ref_is_zero(struct percpu_ref *ref);
136 * percpu_ref_kill - drop the initial ref
137 * @ref: percpu_ref to kill
139 * Must be used to drop the initial ref on a percpu refcount; must be called
142 * Switches @ref into atomic mode before gathering up the percpu counters
143 * and dropping the initial ref.
147 static inline void percpu_ref_kill(struct percpu_ref *ref) in percpu_ref_kill() argument
149 percpu_ref_kill_and_confirm(ref, NULL); in percpu_ref_kill()
156 * branches as it can't assume that @ref->percpu_count is not NULL.
158 static inline bool __ref_is_percpu(struct percpu_ref *ref, in __ref_is_percpu() argument
164 * The value of @ref->percpu_count_ptr is tested for in __ref_is_percpu()
174 percpu_ptr = READ_ONCE(ref->percpu_count_ptr); in __ref_is_percpu()
191 * @ref: percpu_ref to get
196 * This function is safe to call as long as @ref is between init and exit.
198 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) in percpu_ref_get_many() argument
204 if (__ref_is_percpu(ref, &percpu_count)) in percpu_ref_get_many()
207 atomic_long_add(nr, &ref->data->count); in percpu_ref_get_many()
214 * @ref: percpu_ref to get
218 * This function is safe to call as long as @ref is between init and exit.
220 static inline void percpu_ref_get(struct percpu_ref *ref) in percpu_ref_get() argument
222 percpu_ref_get_many(ref, 1); in percpu_ref_get()
227 * @ref: percpu_ref to try-get
233 * This function is safe to call as long as @ref is between init and exit.
235 static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, in percpu_ref_tryget_many() argument
243 if (__ref_is_percpu(ref, &percpu_count)) { in percpu_ref_tryget_many()
247 ret = atomic_long_add_unless(&ref->data->count, nr, 0); in percpu_ref_tryget_many()
257 * @ref: percpu_ref to try-get
262 * This function is safe to call as long as @ref is between init and exit.
264 static inline bool percpu_ref_tryget(struct percpu_ref *ref) in percpu_ref_tryget() argument
266 return percpu_ref_tryget_many(ref, 1); in percpu_ref_tryget()
273 * This function is safe to call as long as @ref is between init and exit.
275 static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref) in percpu_ref_tryget_live_rcu() argument
282 if (likely(__ref_is_percpu(ref, &percpu_count))) { in percpu_ref_tryget_live_rcu()
285 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { in percpu_ref_tryget_live_rcu()
286 ret = atomic_long_inc_not_zero(&ref->data->count); in percpu_ref_tryget_live_rcu()
293 * @ref: percpu_ref to try-get
304 * This function is safe to call as long as @ref is between init and exit.
306 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) in percpu_ref_tryget_live() argument
311 ret = percpu_ref_tryget_live_rcu(ref); in percpu_ref_tryget_live()
318 * @ref: percpu_ref to put
324 * This function is safe to call as long as @ref is between init and exit.
326 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) in percpu_ref_put_many() argument
332 if (__ref_is_percpu(ref, &percpu_count)) in percpu_ref_put_many()
334 else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) in percpu_ref_put_many()
335 ref->data->release(ref); in percpu_ref_put_many()
342 * @ref: percpu_ref to put
347 * This function is safe to call as long as @ref is between init and exit.
349 static inline void percpu_ref_put(struct percpu_ref *ref) in percpu_ref_put() argument
351 percpu_ref_put_many(ref, 1); in percpu_ref_put()
356 * @ref: percpu_ref to test
358 * Returns %true if @ref is dying or dead.
360 * This function is safe to call as long as @ref is between init and exit
363 static inline bool percpu_ref_is_dying(struct percpu_ref *ref) in percpu_ref_is_dying() argument
365 return ref->percpu_count_ptr & __PERCPU_REF_DEAD; in percpu_ref_is_dying()