1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_RCULIST_H
3  #define _LINUX_RCULIST_H
4  
5  #ifdef __KERNEL__
6  
7  /*
8   * RCU-protected list version
9   */
10  #include <linux/list.h>
11  #include <linux/rcupdate.h>
12  
13  /*
14   * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
15   * @list: list to be initialized
16   *
17   * You should instead use INIT_LIST_HEAD() for normal initialization and
18   * cleanup tasks, when readers have no access to the list being initialized.
19   * However, if the list being initialized is visible to readers, you
20   * need to keep the compiler from being too mischievous.
21   */
INIT_LIST_HEAD_RCU(struct list_head * list)22  static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
23  {
24  	WRITE_ONCE(list->next, list);
25  	WRITE_ONCE(list->prev, list);
26  }
27  
28  /*
29   * return the ->next pointer of a list_head in an rcu safe
30   * way, we must not access it directly
31   */
32  #define list_next_rcu(list)	(*((struct list_head __rcu **)(&(list)->next)))
33  
34  /**
35   * list_tail_rcu - returns the prev pointer of the head of the list
36   * @head: the head of the list
37   *
38   * Note: This should only be used with the list header, and even then
39   * only if list_del() and similar primitives are not also used on the
40   * list header.
41   */
42  #define list_tail_rcu(head)	(*((struct list_head __rcu **)(&(head)->prev)))
43  
44  /*
45   * Check during list traversal that we are within an RCU reader
46   */
47  
48  #define check_arg_count_one(dummy)
49  
50  #ifdef CONFIG_PROVE_RCU_LIST
51  #define __list_check_rcu(dummy, cond, extra...)				\
52  	({								\
53  	check_arg_count_one(extra);					\
54  	RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(),		\
55  			 "RCU-list traversed in non-reader section!");	\
56  	})
57  
58  #define __list_check_srcu(cond)					 \
59  	({								 \
60  	RCU_LOCKDEP_WARN(!(cond),					 \
61  		"RCU-list traversed without holding the required lock!");\
62  	})
63  #else
64  #define __list_check_rcu(dummy, cond, extra...)				\
65  	({ check_arg_count_one(extra); })
66  
67  #define __list_check_srcu(cond) ({ })
68  #endif
69  
70  /*
71   * Insert a new entry between two known consecutive entries.
72   *
73   * This is only for internal list manipulation where we know
74   * the prev/next entries already!
75   */
__list_add_rcu(struct list_head * new,struct list_head * prev,struct list_head * next)76  static inline void __list_add_rcu(struct list_head *new,
77  		struct list_head *prev, struct list_head *next)
78  {
79  	if (!__list_add_valid(new, prev, next))
80  		return;
81  
82  	new->next = next;
83  	new->prev = prev;
84  	rcu_assign_pointer(list_next_rcu(prev), new);
85  	next->prev = new;
86  }
87  
88  /**
89   * list_add_rcu - add a new entry to rcu-protected list
90   * @new: new entry to be added
91   * @head: list head to add it after
92   *
93   * Insert a new entry after the specified head.
94   * This is good for implementing stacks.
95   *
96   * The caller must take whatever precautions are necessary
97   * (such as holding appropriate locks) to avoid racing
98   * with another list-mutation primitive, such as list_add_rcu()
99   * or list_del_rcu(), running on this same list.
100   * However, it is perfectly legal to run concurrently with
101   * the _rcu list-traversal primitives, such as
102   * list_for_each_entry_rcu().
103   */
list_add_rcu(struct list_head * new,struct list_head * head)104  static inline void list_add_rcu(struct list_head *new, struct list_head *head)
105  {
106  	__list_add_rcu(new, head, head->next);
107  }
108  
109  /**
110   * list_add_tail_rcu - add a new entry to rcu-protected list
111   * @new: new entry to be added
112   * @head: list head to add it before
113   *
114   * Insert a new entry before the specified head.
115   * This is useful for implementing queues.
116   *
117   * The caller must take whatever precautions are necessary
118   * (such as holding appropriate locks) to avoid racing
119   * with another list-mutation primitive, such as list_add_tail_rcu()
120   * or list_del_rcu(), running on this same list.
121   * However, it is perfectly legal to run concurrently with
122   * the _rcu list-traversal primitives, such as
123   * list_for_each_entry_rcu().
124   */
list_add_tail_rcu(struct list_head * new,struct list_head * head)125  static inline void list_add_tail_rcu(struct list_head *new,
126  					struct list_head *head)
127  {
128  	__list_add_rcu(new, head->prev, head);
129  }
130  
131  /**
132   * list_del_rcu - deletes entry from list without re-initialization
133   * @entry: the element to delete from the list.
134   *
135   * Note: list_empty() on entry does not return true after this,
136   * the entry is in an undefined state. It is useful for RCU based
137   * lockfree traversal.
138   *
139   * In particular, it means that we can not poison the forward
140   * pointers that may still be used for walking the list.
141   *
142   * The caller must take whatever precautions are necessary
143   * (such as holding appropriate locks) to avoid racing
144   * with another list-mutation primitive, such as list_del_rcu()
145   * or list_add_rcu(), running on this same list.
146   * However, it is perfectly legal to run concurrently with
147   * the _rcu list-traversal primitives, such as
148   * list_for_each_entry_rcu().
149   *
150   * Note that the caller is not permitted to immediately free
151   * the newly deleted entry.  Instead, either synchronize_rcu()
152   * or call_rcu() must be used to defer freeing until an RCU
153   * grace period has elapsed.
154   */
list_del_rcu(struct list_head * entry)155  static inline void list_del_rcu(struct list_head *entry)
156  {
157  	__list_del_entry(entry);
158  	entry->prev = LIST_POISON2;
159  }
160  
161  /**
162   * hlist_del_init_rcu - deletes entry from hash list with re-initialization
163   * @n: the element to delete from the hash list.
164   *
165   * Note: list_unhashed() on the node return true after this. It is
166   * useful for RCU based read lockfree traversal if the writer side
167   * must know if the list entry is still hashed or already unhashed.
168   *
169   * In particular, it means that we can not poison the forward pointers
170   * that may still be used for walking the hash list and we can only
171   * zero the pprev pointer so list_unhashed() will return true after
172   * this.
173   *
174   * The caller must take whatever precautions are necessary (such as
175   * holding appropriate locks) to avoid racing with another
176   * list-mutation primitive, such as hlist_add_head_rcu() or
177   * hlist_del_rcu(), running on this same list.  However, it is
178   * perfectly legal to run concurrently with the _rcu list-traversal
179   * primitives, such as hlist_for_each_entry_rcu().
180   */
hlist_del_init_rcu(struct hlist_node * n)181  static inline void hlist_del_init_rcu(struct hlist_node *n)
182  {
183  	if (!hlist_unhashed(n)) {
184  		__hlist_del(n);
185  		WRITE_ONCE(n->pprev, NULL);
186  	}
187  }
188  
189  /**
190   * list_replace_rcu - replace old entry by new one
191   * @old : the element to be replaced
192   * @new : the new element to insert
193   *
194   * The @old entry will be replaced with the @new entry atomically from
195   * the perspective of concurrent readers.  It is the caller's responsibility
196   * to synchronize with concurrent updaters, if any.
197   *
198   * Note: @old should not be empty.
199   */
list_replace_rcu(struct list_head * old,struct list_head * new)200  static inline void list_replace_rcu(struct list_head *old,
201  				struct list_head *new)
202  {
203  	new->next = old->next;
204  	new->prev = old->prev;
205  	rcu_assign_pointer(list_next_rcu(new->prev), new);
206  	new->next->prev = new;
207  	old->prev = LIST_POISON2;
208  }
209  
210  /**
211   * __list_splice_init_rcu - join an RCU-protected list into an existing list.
212   * @list:	the RCU-protected list to splice
213   * @prev:	points to the last element of the existing list
214   * @next:	points to the first element of the existing list
215   * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
216   *
217   * The list pointed to by @prev and @next can be RCU-read traversed
218   * concurrently with this function.
219   *
220   * Note that this function blocks.
221   *
222   * Important note: the caller must take whatever action is necessary to prevent
223   * any other updates to the existing list.  In principle, it is possible to
224   * modify the list as soon as sync() begins execution. If this sort of thing
225   * becomes necessary, an alternative version based on call_rcu() could be
226   * created.  But only if -really- needed -- there is no shortage of RCU API
227   * members.
228   */
__list_splice_init_rcu(struct list_head * list,struct list_head * prev,struct list_head * next,void (* sync)(void))229  static inline void __list_splice_init_rcu(struct list_head *list,
230  					  struct list_head *prev,
231  					  struct list_head *next,
232  					  void (*sync)(void))
233  {
234  	struct list_head *first = list->next;
235  	struct list_head *last = list->prev;
236  
237  	/*
238  	 * "first" and "last" tracking list, so initialize it.  RCU readers
239  	 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
240  	 * instead of INIT_LIST_HEAD().
241  	 */
242  
243  	INIT_LIST_HEAD_RCU(list);
244  
245  	/*
246  	 * At this point, the list body still points to the source list.
247  	 * Wait for any readers to finish using the list before splicing
248  	 * the list body into the new list.  Any new readers will see
249  	 * an empty list.
250  	 */
251  
252  	sync();
253  	ASSERT_EXCLUSIVE_ACCESS(*first);
254  	ASSERT_EXCLUSIVE_ACCESS(*last);
255  
256  	/*
257  	 * Readers are finished with the source list, so perform splice.
258  	 * The order is important if the new list is global and accessible
259  	 * to concurrent RCU readers.  Note that RCU readers are not
260  	 * permitted to traverse the prev pointers without excluding
261  	 * this function.
262  	 */
263  
264  	last->next = next;
265  	rcu_assign_pointer(list_next_rcu(prev), first);
266  	first->prev = prev;
267  	next->prev = last;
268  }
269  
270  /**
271   * list_splice_init_rcu - splice an RCU-protected list into an existing list,
272   *                        designed for stacks.
273   * @list:	the RCU-protected list to splice
274   * @head:	the place in the existing list to splice the first list into
275   * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
276   */
list_splice_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))277  static inline void list_splice_init_rcu(struct list_head *list,
278  					struct list_head *head,
279  					void (*sync)(void))
280  {
281  	if (!list_empty(list))
282  		__list_splice_init_rcu(list, head, head->next, sync);
283  }
284  
285  /**
286   * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
287   *                             list, designed for queues.
288   * @list:	the RCU-protected list to splice
289   * @head:	the place in the existing list to splice the first list into
290   * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
291   */
list_splice_tail_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))292  static inline void list_splice_tail_init_rcu(struct list_head *list,
293  					     struct list_head *head,
294  					     void (*sync)(void))
295  {
296  	if (!list_empty(list))
297  		__list_splice_init_rcu(list, head->prev, head, sync);
298  }
299  
300  /**
301   * list_entry_rcu - get the struct for this entry
302   * @ptr:        the &struct list_head pointer.
303   * @type:       the type of the struct this is embedded in.
304   * @member:     the name of the list_head within the struct.
305   *
306   * This primitive may safely run concurrently with the _rcu list-mutation
307   * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
308   */
309  #define list_entry_rcu(ptr, type, member) \
310  	container_of(READ_ONCE(ptr), type, member)
311  
312  /*
313   * Where are list_empty_rcu() and list_first_entry_rcu()?
314   *
315   * They do not exist because they would lead to subtle race conditions:
316   *
317   * if (!list_empty_rcu(mylist)) {
318   *	struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
319   *	do_something(bar);
320   * }
321   *
322   * The list might be non-empty when list_empty_rcu() checks it, but it
323   * might have become empty by the time that list_first_entry_rcu() rereads
324   * the ->next pointer, which would result in a SEGV.
325   *
326   * When not using RCU, it is OK for list_first_entry() to re-read that
327   * pointer because both functions should be protected by some lock that
328   * blocks writers.
329   *
330   * When using RCU, list_empty() uses READ_ONCE() to fetch the
331   * RCU-protected ->next pointer and then compares it to the address of the
332   * list head.  However, it neither dereferences this pointer nor provides
333   * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
334   * rcu_dereference() is not needed), which means that list_empty() can be
335   * used anywhere you would want to use list_empty_rcu().  Just don't
336   * expect anything useful to happen if you do a subsequent lockless
337   * call to list_first_entry_rcu()!!!
338   *
339   * See list_first_or_null_rcu for an alternative.
340   */
341  
342  /**
343   * list_first_or_null_rcu - get the first element from a list
344   * @ptr:        the list head to take the element from.
345   * @type:       the type of the struct this is embedded in.
346   * @member:     the name of the list_head within the struct.
347   *
348   * Note that if the list is empty, it returns NULL.
349   *
350   * This primitive may safely run concurrently with the _rcu list-mutation
351   * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
352   */
353  #define list_first_or_null_rcu(ptr, type, member) \
354  ({ \
355  	struct list_head *__ptr = (ptr); \
356  	struct list_head *__next = READ_ONCE(__ptr->next); \
357  	likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
358  })
359  
360  /**
361   * list_next_or_null_rcu - get the next element from a list
362   * @head:	the head for the list.
363   * @ptr:        the list head to take the next element from.
364   * @type:       the type of the struct this is embedded in.
365   * @member:     the name of the list_head within the struct.
366   *
367   * Note that if the ptr is at the end of the list, NULL is returned.
368   *
369   * This primitive may safely run concurrently with the _rcu list-mutation
370   * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
371   */
372  #define list_next_or_null_rcu(head, ptr, type, member) \
373  ({ \
374  	struct list_head *__head = (head); \
375  	struct list_head *__ptr = (ptr); \
376  	struct list_head *__next = READ_ONCE(__ptr->next); \
377  	likely(__next != __head) ? list_entry_rcu(__next, type, \
378  						  member) : NULL; \
379  })
380  
381  /**
382   * list_for_each_entry_rcu	-	iterate over rcu list of given type
383   * @pos:	the type * to use as a loop cursor.
384   * @head:	the head for your list.
385   * @member:	the name of the list_head within the struct.
386   * @cond:	optional lockdep expression if called from non-RCU protection.
387   *
388   * This list-traversal primitive may safely run concurrently with
389   * the _rcu list-mutation primitives such as list_add_rcu()
390   * as long as the traversal is guarded by rcu_read_lock().
391   */
392  #define list_for_each_entry_rcu(pos, head, member, cond...)		\
393  	for (__list_check_rcu(dummy, ## cond, 0),			\
394  	     pos = list_entry_rcu((head)->next, typeof(*pos), member);	\
395  		&pos->member != (head);					\
396  		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
397  
398  /**
399   * list_for_each_entry_srcu	-	iterate over rcu list of given type
400   * @pos:	the type * to use as a loop cursor.
401   * @head:	the head for your list.
402   * @member:	the name of the list_head within the struct.
403   * @cond:	lockdep expression for the lock required to traverse the list.
404   *
405   * This list-traversal primitive may safely run concurrently with
406   * the _rcu list-mutation primitives such as list_add_rcu()
407   * as long as the traversal is guarded by srcu_read_lock().
408   * The lockdep expression srcu_read_lock_held() can be passed as the
409   * cond argument from read side.
410   */
411  #define list_for_each_entry_srcu(pos, head, member, cond)		\
412  	for (__list_check_srcu(cond),					\
413  	     pos = list_entry_rcu((head)->next, typeof(*pos), member);	\
414  		&pos->member != (head);					\
415  		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
416  
417  /**
418   * list_entry_lockless - get the struct for this entry
419   * @ptr:        the &struct list_head pointer.
420   * @type:       the type of the struct this is embedded in.
421   * @member:     the name of the list_head within the struct.
422   *
423   * This primitive may safely run concurrently with the _rcu
424   * list-mutation primitives such as list_add_rcu(), but requires some
425   * implicit RCU read-side guarding.  One example is running within a special
426   * exception-time environment where preemption is disabled and where lockdep
427   * cannot be invoked.  Another example is when items are added to the list,
428   * but never deleted.
429   */
430  #define list_entry_lockless(ptr, type, member) \
431  	container_of((typeof(ptr))READ_ONCE(ptr), type, member)
432  
433  /**
434   * list_for_each_entry_lockless - iterate over rcu list of given type
435   * @pos:	the type * to use as a loop cursor.
436   * @head:	the head for your list.
437   * @member:	the name of the list_struct within the struct.
438   *
439   * This primitive may safely run concurrently with the _rcu
440   * list-mutation primitives such as list_add_rcu(), but requires some
441   * implicit RCU read-side guarding.  One example is running within a special
442   * exception-time environment where preemption is disabled and where lockdep
443   * cannot be invoked.  Another example is when items are added to the list,
444   * but never deleted.
445   */
446  #define list_for_each_entry_lockless(pos, head, member) \
447  	for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
448  	     &pos->member != (head); \
449  	     pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
450  
451  /**
452   * list_for_each_entry_continue_rcu - continue iteration over list of given type
453   * @pos:	the type * to use as a loop cursor.
454   * @head:	the head for your list.
455   * @member:	the name of the list_head within the struct.
456   *
457   * Continue to iterate over list of given type, continuing after
458   * the current position which must have been in the list when the RCU read
459   * lock was taken.
460   * This would typically require either that you obtained the node from a
461   * previous walk of the list in the same RCU read-side critical section, or
462   * that you held some sort of non-RCU reference (such as a reference count)
463   * to keep the node alive *and* in the list.
464   *
465   * This iterator is similar to list_for_each_entry_from_rcu() except
466   * this starts after the given position and that one starts at the given
467   * position.
468   */
469  #define list_for_each_entry_continue_rcu(pos, head, member) 		\
470  	for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
471  	     &pos->member != (head);	\
472  	     pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
473  
474  /**
475   * list_for_each_entry_from_rcu - iterate over a list from current point
476   * @pos:	the type * to use as a loop cursor.
477   * @head:	the head for your list.
478   * @member:	the name of the list_node within the struct.
479   *
480   * Iterate over the tail of a list starting from a given position,
481   * which must have been in the list when the RCU read lock was taken.
482   * This would typically require either that you obtained the node from a
483   * previous walk of the list in the same RCU read-side critical section, or
484   * that you held some sort of non-RCU reference (such as a reference count)
485   * to keep the node alive *and* in the list.
486   *
487   * This iterator is similar to list_for_each_entry_continue_rcu() except
488   * this starts from the given position and that one starts from the position
489   * after the given position.
490   */
491  #define list_for_each_entry_from_rcu(pos, head, member)			\
492  	for (; &(pos)->member != (head);					\
493  		pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
494  
495  /**
496   * hlist_del_rcu - deletes entry from hash list without re-initialization
497   * @n: the element to delete from the hash list.
498   *
499   * Note: list_unhashed() on entry does not return true after this,
500   * the entry is in an undefined state. It is useful for RCU based
501   * lockfree traversal.
502   *
503   * In particular, it means that we can not poison the forward
504   * pointers that may still be used for walking the hash list.
505   *
506   * The caller must take whatever precautions are necessary
507   * (such as holding appropriate locks) to avoid racing
508   * with another list-mutation primitive, such as hlist_add_head_rcu()
509   * or hlist_del_rcu(), running on this same list.
510   * However, it is perfectly legal to run concurrently with
511   * the _rcu list-traversal primitives, such as
512   * hlist_for_each_entry().
513   */
hlist_del_rcu(struct hlist_node * n)514  static inline void hlist_del_rcu(struct hlist_node *n)
515  {
516  	__hlist_del(n);
517  	WRITE_ONCE(n->pprev, LIST_POISON2);
518  }
519  
520  /**
521   * hlist_replace_rcu - replace old entry by new one
522   * @old : the element to be replaced
523   * @new : the new element to insert
524   *
525   * The @old entry will be replaced with the @new entry atomically from
526   * the perspective of concurrent readers.  It is the caller's responsibility
527   * to synchronize with concurrent updaters, if any.
528   */
hlist_replace_rcu(struct hlist_node * old,struct hlist_node * new)529  static inline void hlist_replace_rcu(struct hlist_node *old,
530  					struct hlist_node *new)
531  {
532  	struct hlist_node *next = old->next;
533  
534  	new->next = next;
535  	WRITE_ONCE(new->pprev, old->pprev);
536  	rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
537  	if (next)
538  		WRITE_ONCE(new->next->pprev, &new->next);
539  	WRITE_ONCE(old->pprev, LIST_POISON2);
540  }
541  
542  /**
543   * hlists_swap_heads_rcu - swap the lists the hlist heads point to
544   * @left:  The hlist head on the left
545   * @right: The hlist head on the right
546   *
547   * The lists start out as [@left  ][node1 ... ] and
548   *                        [@right ][node2 ... ]
549   * The lists end up as    [@left  ][node2 ... ]
550   *                        [@right ][node1 ... ]
551   */
hlists_swap_heads_rcu(struct hlist_head * left,struct hlist_head * right)552  static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
553  {
554  	struct hlist_node *node1 = left->first;
555  	struct hlist_node *node2 = right->first;
556  
557  	rcu_assign_pointer(left->first, node2);
558  	rcu_assign_pointer(right->first, node1);
559  	WRITE_ONCE(node2->pprev, &left->first);
560  	WRITE_ONCE(node1->pprev, &right->first);
561  }
562  
563  /*
564   * return the first or the next element in an RCU protected hlist
565   */
566  #define hlist_first_rcu(head)	(*((struct hlist_node __rcu **)(&(head)->first)))
567  #define hlist_next_rcu(node)	(*((struct hlist_node __rcu **)(&(node)->next)))
568  #define hlist_pprev_rcu(node)	(*((struct hlist_node __rcu **)((node)->pprev)))
569  
570  /**
571   * hlist_add_head_rcu
572   * @n: the element to add to the hash list.
573   * @h: the list to add to.
574   *
575   * Description:
576   * Adds the specified element to the specified hlist,
577   * while permitting racing traversals.
578   *
579   * The caller must take whatever precautions are necessary
580   * (such as holding appropriate locks) to avoid racing
581   * with another list-mutation primitive, such as hlist_add_head_rcu()
582   * or hlist_del_rcu(), running on this same list.
583   * However, it is perfectly legal to run concurrently with
584   * the _rcu list-traversal primitives, such as
585   * hlist_for_each_entry_rcu(), used to prevent memory-consistency
586   * problems on Alpha CPUs.  Regardless of the type of CPU, the
587   * list-traversal primitive must be guarded by rcu_read_lock().
588   */
hlist_add_head_rcu(struct hlist_node * n,struct hlist_head * h)589  static inline void hlist_add_head_rcu(struct hlist_node *n,
590  					struct hlist_head *h)
591  {
592  	struct hlist_node *first = h->first;
593  
594  	n->next = first;
595  	WRITE_ONCE(n->pprev, &h->first);
596  	rcu_assign_pointer(hlist_first_rcu(h), n);
597  	if (first)
598  		WRITE_ONCE(first->pprev, &n->next);
599  }
600  
601  /**
602   * hlist_add_tail_rcu
603   * @n: the element to add to the hash list.
604   * @h: the list to add to.
605   *
606   * Description:
607   * Adds the specified element to the specified hlist,
608   * while permitting racing traversals.
609   *
610   * The caller must take whatever precautions are necessary
611   * (such as holding appropriate locks) to avoid racing
612   * with another list-mutation primitive, such as hlist_add_head_rcu()
613   * or hlist_del_rcu(), running on this same list.
614   * However, it is perfectly legal to run concurrently with
615   * the _rcu list-traversal primitives, such as
616   * hlist_for_each_entry_rcu(), used to prevent memory-consistency
617   * problems on Alpha CPUs.  Regardless of the type of CPU, the
618   * list-traversal primitive must be guarded by rcu_read_lock().
619   */
hlist_add_tail_rcu(struct hlist_node * n,struct hlist_head * h)620  static inline void hlist_add_tail_rcu(struct hlist_node *n,
621  				      struct hlist_head *h)
622  {
623  	struct hlist_node *i, *last = NULL;
624  
625  	/* Note: write side code, so rcu accessors are not needed. */
626  	for (i = h->first; i; i = i->next)
627  		last = i;
628  
629  	if (last) {
630  		n->next = last->next;
631  		WRITE_ONCE(n->pprev, &last->next);
632  		rcu_assign_pointer(hlist_next_rcu(last), n);
633  	} else {
634  		hlist_add_head_rcu(n, h);
635  	}
636  }
637  
638  /**
639   * hlist_add_before_rcu
640   * @n: the new element to add to the hash list.
641   * @next: the existing element to add the new element before.
642   *
643   * Description:
644   * Adds the specified element to the specified hlist
645   * before the specified node while permitting racing traversals.
646   *
647   * The caller must take whatever precautions are necessary
648   * (such as holding appropriate locks) to avoid racing
649   * with another list-mutation primitive, such as hlist_add_head_rcu()
650   * or hlist_del_rcu(), running on this same list.
651   * However, it is perfectly legal to run concurrently with
652   * the _rcu list-traversal primitives, such as
653   * hlist_for_each_entry_rcu(), used to prevent memory-consistency
654   * problems on Alpha CPUs.
655   */
hlist_add_before_rcu(struct hlist_node * n,struct hlist_node * next)656  static inline void hlist_add_before_rcu(struct hlist_node *n,
657  					struct hlist_node *next)
658  {
659  	WRITE_ONCE(n->pprev, next->pprev);
660  	n->next = next;
661  	rcu_assign_pointer(hlist_pprev_rcu(n), n);
662  	WRITE_ONCE(next->pprev, &n->next);
663  }
664  
665  /**
666   * hlist_add_behind_rcu
667   * @n: the new element to add to the hash list.
668   * @prev: the existing element to add the new element after.
669   *
670   * Description:
671   * Adds the specified element to the specified hlist
672   * after the specified node while permitting racing traversals.
673   *
674   * The caller must take whatever precautions are necessary
675   * (such as holding appropriate locks) to avoid racing
676   * with another list-mutation primitive, such as hlist_add_head_rcu()
677   * or hlist_del_rcu(), running on this same list.
678   * However, it is perfectly legal to run concurrently with
679   * the _rcu list-traversal primitives, such as
680   * hlist_for_each_entry_rcu(), used to prevent memory-consistency
681   * problems on Alpha CPUs.
682   */
hlist_add_behind_rcu(struct hlist_node * n,struct hlist_node * prev)683  static inline void hlist_add_behind_rcu(struct hlist_node *n,
684  					struct hlist_node *prev)
685  {
686  	n->next = prev->next;
687  	WRITE_ONCE(n->pprev, &prev->next);
688  	rcu_assign_pointer(hlist_next_rcu(prev), n);
689  	if (n->next)
690  		WRITE_ONCE(n->next->pprev, &n->next);
691  }
692  
693  #define __hlist_for_each_rcu(pos, head)				\
694  	for (pos = rcu_dereference(hlist_first_rcu(head));	\
695  	     pos;						\
696  	     pos = rcu_dereference(hlist_next_rcu(pos)))
697  
698  /**
699   * hlist_for_each_entry_rcu - iterate over rcu list of given type
700   * @pos:	the type * to use as a loop cursor.
701   * @head:	the head for your list.
702   * @member:	the name of the hlist_node within the struct.
703   * @cond:	optional lockdep expression if called from non-RCU protection.
704   *
705   * This list-traversal primitive may safely run concurrently with
706   * the _rcu list-mutation primitives such as hlist_add_head_rcu()
707   * as long as the traversal is guarded by rcu_read_lock().
708   */
709  #define hlist_for_each_entry_rcu(pos, head, member, cond...)		\
710  	for (__list_check_rcu(dummy, ## cond, 0),			\
711  	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
712  			typeof(*(pos)), member);			\
713  		pos;							\
714  		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
715  			&(pos)->member)), typeof(*(pos)), member))
716  
717  /**
718   * hlist_for_each_entry_srcu - iterate over rcu list of given type
719   * @pos:	the type * to use as a loop cursor.
720   * @head:	the head for your list.
721   * @member:	the name of the hlist_node within the struct.
722   * @cond:	lockdep expression for the lock required to traverse the list.
723   *
724   * This list-traversal primitive may safely run concurrently with
725   * the _rcu list-mutation primitives such as hlist_add_head_rcu()
726   * as long as the traversal is guarded by srcu_read_lock().
727   * The lockdep expression srcu_read_lock_held() can be passed as the
728   * cond argument from read side.
729   */
730  #define hlist_for_each_entry_srcu(pos, head, member, cond)		\
731  	for (__list_check_srcu(cond),					\
732  	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
733  			typeof(*(pos)), member);			\
734  		pos;							\
735  		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
736  			&(pos)->member)), typeof(*(pos)), member))
737  
738  /**
739   * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
740   * @pos:	the type * to use as a loop cursor.
741   * @head:	the head for your list.
742   * @member:	the name of the hlist_node within the struct.
743   *
744   * This list-traversal primitive may safely run concurrently with
745   * the _rcu list-mutation primitives such as hlist_add_head_rcu()
746   * as long as the traversal is guarded by rcu_read_lock().
747   *
748   * This is the same as hlist_for_each_entry_rcu() except that it does
749   * not do any RCU debugging or tracing.
750   */
751  #define hlist_for_each_entry_rcu_notrace(pos, head, member)			\
752  	for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
753  			typeof(*(pos)), member);			\
754  		pos;							\
755  		pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
756  			&(pos)->member)), typeof(*(pos)), member))
757  
758  /**
759   * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
760   * @pos:	the type * to use as a loop cursor.
761   * @head:	the head for your list.
762   * @member:	the name of the hlist_node within the struct.
763   *
764   * This list-traversal primitive may safely run concurrently with
765   * the _rcu list-mutation primitives such as hlist_add_head_rcu()
766   * as long as the traversal is guarded by rcu_read_lock().
767   */
768  #define hlist_for_each_entry_rcu_bh(pos, head, member)			\
769  	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
770  			typeof(*(pos)), member);			\
771  		pos;							\
772  		pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
773  			&(pos)->member)), typeof(*(pos)), member))
774  
775  /**
776   * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
777   * @pos:	the type * to use as a loop cursor.
778   * @member:	the name of the hlist_node within the struct.
779   */
780  #define hlist_for_each_entry_continue_rcu(pos, member)			\
781  	for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
782  			&(pos)->member)), typeof(*(pos)), member);	\
783  	     pos;							\
784  	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(	\
785  			&(pos)->member)), typeof(*(pos)), member))
786  
787  /**
788   * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
789   * @pos:	the type * to use as a loop cursor.
790   * @member:	the name of the hlist_node within the struct.
791   */
792  #define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
793  	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
794  			&(pos)->member)), typeof(*(pos)), member);	\
795  	     pos;							\
796  	     pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(	\
797  			&(pos)->member)), typeof(*(pos)), member))
798  
799  /**
800   * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
801   * @pos:	the type * to use as a loop cursor.
802   * @member:	the name of the hlist_node within the struct.
803   */
804  #define hlist_for_each_entry_from_rcu(pos, member)			\
805  	for (; pos;							\
806  	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(	\
807  			&(pos)->member)), typeof(*(pos)), member))
808  
809  #endif	/* __KERNEL__ */
810  #endif
811