1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Fast and scalable bitmaps.
4   *
5   * Copyright (C) 2016 Facebook
6   * Copyright (C) 2013-2014 Jens Axboe
7   */
8  
9  #ifndef __LINUX_SCALE_BITMAP_H
10  #define __LINUX_SCALE_BITMAP_H
11  
12  #include <linux/atomic.h>
13  #include <linux/bitops.h>
14  #include <linux/cache.h>
15  #include <linux/list.h>
16  #include <linux/log2.h>
17  #include <linux/minmax.h>
18  #include <linux/percpu.h>
19  #include <linux/slab.h>
20  #include <linux/smp.h>
21  #include <linux/types.h>
22  #include <linux/wait.h>
23  
24  struct seq_file;
25  
26  /**
27   * struct sbitmap_word - Word in a &struct sbitmap.
28   */
29  struct sbitmap_word {
30  	/**
31  	 * @word: word holding free bits
32  	 */
33  	unsigned long word;
34  
35  	/**
36  	 * @cleared: word holding cleared bits
37  	 */
38  	unsigned long cleared ____cacheline_aligned_in_smp;
39  
40  	/**
41  	 * @swap_lock: serializes simultaneous updates of ->word and ->cleared
42  	 */
43  	raw_spinlock_t swap_lock;
44  } ____cacheline_aligned_in_smp;
45  
46  /**
47   * struct sbitmap - Scalable bitmap.
48   *
49   * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
50   * trades off higher memory usage for better scalability.
51   */
52  struct sbitmap {
53  	/**
54  	 * @depth: Number of bits used in the whole bitmap.
55  	 */
56  	unsigned int depth;
57  
58  	/**
59  	 * @shift: log2(number of bits used per word)
60  	 */
61  	unsigned int shift;
62  
63  	/**
64  	 * @map_nr: Number of words (cachelines) being used for the bitmap.
65  	 */
66  	unsigned int map_nr;
67  
68  	/**
69  	 * @round_robin: Allocate bits in strict round-robin order.
70  	 */
71  	bool round_robin;
72  
73  	/**
74  	 * @map: Allocated bitmap.
75  	 */
76  	struct sbitmap_word *map;
77  
78  	/*
79  	 * @alloc_hint: Cache of last successfully allocated or freed bit.
80  	 *
81  	 * This is per-cpu, which allows multiple users to stick to different
82  	 * cachelines until the map is exhausted.
83  	 */
84  	unsigned int __percpu *alloc_hint;
85  };
86  
87  #define SBQ_WAIT_QUEUES 8
88  #define SBQ_WAKE_BATCH 8
89  
90  /**
91   * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
92   */
93  struct sbq_wait_state {
94  	/**
95  	 * @wait: Wait queue.
96  	 */
97  	wait_queue_head_t wait;
98  } ____cacheline_aligned_in_smp;
99  
100  /**
101   * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
102   * bits.
103   *
104   * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
105   * avoid contention on the wait queue spinlock. This ensures that we don't hit a
106   * scalability wall when we run out of free bits and have to start putting tasks
107   * to sleep.
108   */
109  struct sbitmap_queue {
110  	/**
111  	 * @sb: Scalable bitmap.
112  	 */
113  	struct sbitmap sb;
114  
115  	/**
116  	 * @wake_batch: Number of bits which must be freed before we wake up any
117  	 * waiters.
118  	 */
119  	unsigned int wake_batch;
120  
121  	/**
122  	 * @wake_index: Next wait queue in @ws to wake up.
123  	 */
124  	atomic_t wake_index;
125  
126  	/**
127  	 * @ws: Wait queues.
128  	 */
129  	struct sbq_wait_state *ws;
130  
131  	/*
132  	 * @ws_active: count of currently active ws waitqueues
133  	 */
134  	atomic_t ws_active;
135  
136  	/**
137  	 * @min_shallow_depth: The minimum shallow depth which may be passed to
138  	 * sbitmap_queue_get_shallow()
139  	 */
140  	unsigned int min_shallow_depth;
141  
142  	/**
143  	 * @completion_cnt: Number of bits cleared passed to the
144  	 * wakeup function.
145  	 */
146  	atomic_t completion_cnt;
147  
148  	/**
149  	 * @wakeup_cnt: Number of thread wake ups issued.
150  	 */
151  	atomic_t wakeup_cnt;
152  };
153  
154  /**
155   * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
156   * @sb: Bitmap to initialize.
157   * @depth: Number of bits to allocate.
158   * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
159   *         given, a good default is chosen.
160   * @flags: Allocation flags.
161   * @node: Memory node to allocate on.
162   * @round_robin: If true, be stricter about allocation order; always allocate
163   *               starting from the last allocated bit. This is less efficient
164   *               than the default behavior (false).
165   * @alloc_hint: If true, apply percpu hint for where to start searching for
166   *              a free bit.
167   *
168   * Return: Zero on success or negative errno on failure.
169   */
170  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
171  		      gfp_t flags, int node, bool round_robin, bool alloc_hint);
172  
173  /* sbitmap internal helper */
__map_depth(const struct sbitmap * sb,int index)174  static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
175  {
176  	if (index == sb->map_nr - 1)
177  		return sb->depth - (index << sb->shift);
178  	return 1U << sb->shift;
179  }
180  
181  /**
182   * sbitmap_free() - Free memory used by a &struct sbitmap.
183   * @sb: Bitmap to free.
184   */
sbitmap_free(struct sbitmap * sb)185  static inline void sbitmap_free(struct sbitmap *sb)
186  {
187  	free_percpu(sb->alloc_hint);
188  	kvfree(sb->map);
189  	sb->map = NULL;
190  }
191  
192  /**
193   * sbitmap_resize() - Resize a &struct sbitmap.
194   * @sb: Bitmap to resize.
195   * @depth: New number of bits to resize to.
196   *
197   * Doesn't reallocate anything. It's up to the caller to ensure that the new
198   * depth doesn't exceed the depth that the sb was initialized with.
199   */
200  void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
201  
202  /**
203   * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
204   * @sb: Bitmap to allocate from.
205   *
206   * This operation provides acquire barrier semantics if it succeeds.
207   *
208   * Return: Non-negative allocated bit number if successful, -1 otherwise.
209   */
210  int sbitmap_get(struct sbitmap *sb);
211  
212  /**
213   * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
214   * limiting the depth used from each word.
215   * @sb: Bitmap to allocate from.
216   * @shallow_depth: The maximum number of bits to allocate from a single word.
217   *
218   * This rather specific operation allows for having multiple users with
219   * different allocation limits. E.g., there can be a high-priority class that
220   * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
221   * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
222   * class can only allocate half of the total bits in the bitmap, preventing it
223   * from starving out the high-priority class.
224   *
225   * Return: Non-negative allocated bit number if successful, -1 otherwise.
226   */
227  int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
228  
229  /**
230   * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
231   * @sb: Bitmap to check.
232   *
233   * Return: true if any bit in the bitmap is set, false otherwise.
234   */
235  bool sbitmap_any_bit_set(const struct sbitmap *sb);
236  
237  #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
238  #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
239  
240  typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
241  
242  /**
243   * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
244   * @start: Where to start the iteration.
245   * @sb: Bitmap to iterate over.
246   * @fn: Callback. Should return true to continue or false to break early.
247   * @data: Pointer to pass to callback.
248   *
249   * This is inline even though it's non-trivial so that the function calls to the
250   * callback will hopefully get optimized away.
251   */
__sbitmap_for_each_set(struct sbitmap * sb,unsigned int start,sb_for_each_fn fn,void * data)252  static inline void __sbitmap_for_each_set(struct sbitmap *sb,
253  					  unsigned int start,
254  					  sb_for_each_fn fn, void *data)
255  {
256  	unsigned int index;
257  	unsigned int nr;
258  	unsigned int scanned = 0;
259  
260  	if (start >= sb->depth)
261  		start = 0;
262  	index = SB_NR_TO_INDEX(sb, start);
263  	nr = SB_NR_TO_BIT(sb, start);
264  
265  	while (scanned < sb->depth) {
266  		unsigned long word;
267  		unsigned int depth = min_t(unsigned int,
268  					   __map_depth(sb, index) - nr,
269  					   sb->depth - scanned);
270  
271  		scanned += depth;
272  		word = sb->map[index].word & ~sb->map[index].cleared;
273  		if (!word)
274  			goto next;
275  
276  		/*
277  		 * On the first iteration of the outer loop, we need to add the
278  		 * bit offset back to the size of the word for find_next_bit().
279  		 * On all other iterations, nr is zero, so this is a noop.
280  		 */
281  		depth += nr;
282  		while (1) {
283  			nr = find_next_bit(&word, depth, nr);
284  			if (nr >= depth)
285  				break;
286  			if (!fn(sb, (index << sb->shift) + nr, data))
287  				return;
288  
289  			nr++;
290  		}
291  next:
292  		nr = 0;
293  		if (++index >= sb->map_nr)
294  			index = 0;
295  	}
296  }
297  
298  /**
299   * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
300   * @sb: Bitmap to iterate over.
301   * @fn: Callback. Should return true to continue or false to break early.
302   * @data: Pointer to pass to callback.
303   */
sbitmap_for_each_set(struct sbitmap * sb,sb_for_each_fn fn,void * data)304  static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
305  					void *data)
306  {
307  	__sbitmap_for_each_set(sb, 0, fn, data);
308  }
309  
__sbitmap_word(struct sbitmap * sb,unsigned int bitnr)310  static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
311  					    unsigned int bitnr)
312  {
313  	return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
314  }
315  
316  /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
317  
sbitmap_set_bit(struct sbitmap * sb,unsigned int bitnr)318  static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
319  {
320  	set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
321  }
322  
sbitmap_clear_bit(struct sbitmap * sb,unsigned int bitnr)323  static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
324  {
325  	clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
326  }
327  
328  /*
329   * This one is special, since it doesn't actually clear the bit, rather it
330   * sets the corresponding bit in the ->cleared mask instead. Paired with
331   * the caller doing sbitmap_deferred_clear() if a given index is full, which
332   * will clear the previously freed entries in the corresponding ->word.
333   */
sbitmap_deferred_clear_bit(struct sbitmap * sb,unsigned int bitnr)334  static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
335  {
336  	unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
337  
338  	set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
339  }
340  
341  /*
342   * Pair of sbitmap_get, and this one applies both cleared bit and
343   * allocation hint.
344   */
sbitmap_put(struct sbitmap * sb,unsigned int bitnr)345  static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
346  {
347  	sbitmap_deferred_clear_bit(sb, bitnr);
348  
349  	if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
350  		*raw_cpu_ptr(sb->alloc_hint) = bitnr;
351  }
352  
sbitmap_test_bit(struct sbitmap * sb,unsigned int bitnr)353  static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
354  {
355  	return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
356  }
357  
sbitmap_calculate_shift(unsigned int depth)358  static inline int sbitmap_calculate_shift(unsigned int depth)
359  {
360  	int	shift = ilog2(BITS_PER_LONG);
361  
362  	/*
363  	 * If the bitmap is small, shrink the number of bits per word so
364  	 * we spread over a few cachelines, at least. If less than 4
365  	 * bits, just forget about it, it's not going to work optimally
366  	 * anyway.
367  	 */
368  	if (depth >= 4) {
369  		while ((4U << shift) > depth)
370  			shift--;
371  	}
372  
373  	return shift;
374  }
375  
376  /**
377   * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
378   * @sb: Bitmap to show.
379   * @m: struct seq_file to write to.
380   *
381   * This is intended for debugging. The format may change at any time.
382   */
383  void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
384  
385  
386  /**
387   * sbitmap_weight() - Return how many set and not cleared bits in a &struct
388   * sbitmap.
389   * @sb: Bitmap to check.
390   *
391   * Return: How many set and not cleared bits set
392   */
393  unsigned int sbitmap_weight(const struct sbitmap *sb);
394  
395  /**
396   * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
397   * seq_file.
398   * @sb: Bitmap to show.
399   * @m: struct seq_file to write to.
400   *
401   * This is intended for debugging. The output isn't guaranteed to be internally
402   * consistent.
403   */
404  void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
405  
406  /**
407   * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
408   * memory node.
409   * @sbq: Bitmap queue to initialize.
410   * @depth: See sbitmap_init_node().
411   * @shift: See sbitmap_init_node().
412   * @round_robin: See sbitmap_get().
413   * @flags: Allocation flags.
414   * @node: Memory node to allocate on.
415   *
416   * Return: Zero on success or negative errno on failure.
417   */
418  int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
419  			    int shift, bool round_robin, gfp_t flags, int node);
420  
421  /**
422   * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
423   *
424   * @sbq: Bitmap queue to free.
425   */
sbitmap_queue_free(struct sbitmap_queue * sbq)426  static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
427  {
428  	kfree(sbq->ws);
429  	sbitmap_free(&sbq->sb);
430  }
431  
432  /**
433   * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
434   * @sbq: Bitmap queue to recalculate wake batch.
435   * @users: Number of shares.
436   *
437   * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
438   * by depth. This interface is for HCTX shared tags or queue shared tags.
439   */
440  void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
441  					    unsigned int users);
442  
443  /**
444   * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
445   * @sbq: Bitmap queue to resize.
446   * @depth: New number of bits to resize to.
447   *
448   * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
449   * some extra work on the &struct sbitmap_queue, so it's not safe to just
450   * resize the underlying &struct sbitmap.
451   */
452  void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
453  
454  /**
455   * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
456   * sbitmap_queue with preemption already disabled.
457   * @sbq: Bitmap queue to allocate from.
458   *
459   * Return: Non-negative allocated bit number if successful, -1 otherwise.
460   */
461  int __sbitmap_queue_get(struct sbitmap_queue *sbq);
462  
463  /**
464   * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
465   * @sbq: Bitmap queue to allocate from.
466   * @nr_tags: number of tags requested
467   * @offset: offset to add to returned bits
468   *
469   * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
470   * a bit in the mask returned, and the caller must add @offset to the value to
471   * get the absolute tag value.
472   */
473  unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
474  					unsigned int *offset);
475  
476  /**
477   * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
478   * sbitmap_queue, limiting the depth used from each word, with preemption
479   * already disabled.
480   * @sbq: Bitmap queue to allocate from.
481   * @shallow_depth: The maximum number of bits to allocate from a single word.
482   * See sbitmap_get_shallow().
483   *
484   * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
485   * initializing @sbq.
486   *
487   * Return: Non-negative allocated bit number if successful, -1 otherwise.
488   */
489  int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
490  			      unsigned int shallow_depth);
491  
492  /**
493   * sbitmap_queue_get() - Try to allocate a free bit from a &struct
494   * sbitmap_queue.
495   * @sbq: Bitmap queue to allocate from.
496   * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
497   *       sbitmap_queue_clear()).
498   *
499   * Return: Non-negative allocated bit number if successful, -1 otherwise.
500   */
sbitmap_queue_get(struct sbitmap_queue * sbq,unsigned int * cpu)501  static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
502  				    unsigned int *cpu)
503  {
504  	int nr;
505  
506  	*cpu = get_cpu();
507  	nr = __sbitmap_queue_get(sbq);
508  	put_cpu();
509  	return nr;
510  }
511  
512  /**
513   * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
514   * minimum shallow depth that will be used.
515   * @sbq: Bitmap queue in question.
516   * @min_shallow_depth: The minimum shallow depth that will be passed to
517   * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
518   *
519   * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
520   * depends on the depth of the bitmap. Since the shallow allocation functions
521   * effectively operate with a different depth, the shallow depth must be taken
522   * into account when calculating the batch size. This function must be called
523   * with the minimum shallow depth that will be used. Failure to do so can result
524   * in missed wakeups.
525   */
526  void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
527  				     unsigned int min_shallow_depth);
528  
529  /**
530   * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
531   * &struct sbitmap_queue.
532   * @sbq: Bitmap to free from.
533   * @nr: Bit number to free.
534   * @cpu: CPU the bit was allocated on.
535   */
536  void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
537  			 unsigned int cpu);
538  
539  /**
540   * sbitmap_queue_clear_batch() - Free a batch of allocated bits
541   * &struct sbitmap_queue.
542   * @sbq: Bitmap to free from.
543   * @offset: offset for each tag in array
544   * @tags: array of tags
545   * @nr_tags: number of tags in array
546   */
547  void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
548  				int *tags, int nr_tags);
549  
sbq_index_inc(int index)550  static inline int sbq_index_inc(int index)
551  {
552  	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
553  }
554  
sbq_index_atomic_inc(atomic_t * index)555  static inline void sbq_index_atomic_inc(atomic_t *index)
556  {
557  	int old = atomic_read(index);
558  	int new = sbq_index_inc(old);
559  	atomic_cmpxchg(index, old, new);
560  }
561  
562  /**
563   * sbq_wait_ptr() - Get the next wait queue to use for a &struct
564   * sbitmap_queue.
565   * @sbq: Bitmap queue to wait on.
566   * @wait_index: A counter per "user" of @sbq.
567   */
sbq_wait_ptr(struct sbitmap_queue * sbq,atomic_t * wait_index)568  static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
569  						  atomic_t *wait_index)
570  {
571  	struct sbq_wait_state *ws;
572  
573  	ws = &sbq->ws[atomic_read(wait_index)];
574  	sbq_index_atomic_inc(wait_index);
575  	return ws;
576  }
577  
578  /**
579   * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
580   * sbitmap_queue.
581   * @sbq: Bitmap queue to wake up.
582   */
583  void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
584  
585  /**
586   * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
587   * on a &struct sbitmap_queue.
588   * @sbq: Bitmap queue to wake up.
589   * @nr: Number of bits cleared.
590   */
591  void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
592  
593  /**
594   * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
595   * seq_file.
596   * @sbq: Bitmap queue to show.
597   * @m: struct seq_file to write to.
598   *
599   * This is intended for debugging. The format may change at any time.
600   */
601  void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
602  
603  struct sbq_wait {
604  	struct sbitmap_queue *sbq;	/* if set, sbq_wait is accounted */
605  	struct wait_queue_entry wait;
606  };
607  
608  #define DEFINE_SBQ_WAIT(name)							\
609  	struct sbq_wait name = {						\
610  		.sbq = NULL,							\
611  		.wait = {							\
612  			.private	= current,				\
613  			.func		= autoremove_wake_function,		\
614  			.entry		= LIST_HEAD_INIT((name).wait.entry),	\
615  		}								\
616  	}
617  
618  /*
619   * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
620   * internal state.
621   */
622  void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
623  				struct sbq_wait_state *ws,
624  				struct sbq_wait *sbq_wait, int state);
625  
626  /*
627   * Must be paired with sbitmap_prepare_to_wait().
628   */
629  void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
630  				struct sbq_wait *sbq_wait);
631  
632  /*
633   * Wrapper around add_wait_queue(), which maintains some extra internal state
634   */
635  void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
636  			    struct sbq_wait_state *ws,
637  			    struct sbq_wait *sbq_wait);
638  
639  /*
640   * Must be paired with sbitmap_add_wait_queue()
641   */
642  void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
643  
644  #endif /* __LINUX_SCALE_BITMAP_H */
645