1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_FOREGROUND_H
3 #define _BCACHEFS_ALLOC_FOREGROUND_H
4 
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "extents.h"
8 #include "sb-members.h"
9 
10 #include <linux/hash.h>
11 
12 struct bkey;
13 struct bch_dev;
14 struct bch_fs;
15 struct bch_devs_List;
16 
17 extern const char * const bch2_watermarks[];
18 
19 void bch2_reset_alloc_cursors(struct bch_fs *);
20 
21 struct dev_alloc_list {
22 	unsigned	nr;
23 	u8		devs[BCH_SB_MEMBERS_MAX];
24 };
25 
26 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
27 					  struct dev_stripe_state *,
28 					  struct bch_devs_mask *);
29 void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
30 
31 long bch2_bucket_alloc_new_fs(struct bch_dev *);
32 
ob_dev(struct bch_fs * c,struct open_bucket * ob)33 static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
34 {
35 	return bch2_dev_have_ref(c, ob->dev);
36 }
37 
38 struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
39 				      enum bch_watermark, enum bch_data_type,
40 				      struct closure *);
41 
ob_push(struct bch_fs * c,struct open_buckets * obs,struct open_bucket * ob)42 static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
43 			   struct open_bucket *ob)
44 {
45 	BUG_ON(obs->nr >= ARRAY_SIZE(obs->v));
46 
47 	obs->v[obs->nr++] = ob - c->open_buckets;
48 }
49 
50 #define open_bucket_for_each(_c, _obs, _ob, _i)				\
51 	for ((_i) = 0;							\
52 	     (_i) < (_obs)->nr &&					\
53 	     ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true);	\
54 	     (_i)++)
55 
ec_open_bucket(struct bch_fs * c,struct open_buckets * obs)56 static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
57 						 struct open_buckets *obs)
58 {
59 	struct open_bucket *ob;
60 	unsigned i;
61 
62 	open_bucket_for_each(c, obs, ob, i)
63 		if (ob->ec)
64 			return ob;
65 
66 	return NULL;
67 }
68 
69 void bch2_open_bucket_write_error(struct bch_fs *,
70 			struct open_buckets *, unsigned);
71 
72 void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
73 
bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)74 static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
75 {
76 	if (atomic_dec_and_test(&ob->pin))
77 		__bch2_open_bucket_put(c, ob);
78 }
79 
bch2_open_buckets_put(struct bch_fs * c,struct open_buckets * ptrs)80 static inline void bch2_open_buckets_put(struct bch_fs *c,
81 					 struct open_buckets *ptrs)
82 {
83 	struct open_bucket *ob;
84 	unsigned i;
85 
86 	open_bucket_for_each(c, ptrs, ob, i)
87 		bch2_open_bucket_put(c, ob);
88 	ptrs->nr = 0;
89 }
90 
bch2_alloc_sectors_done_inlined(struct bch_fs * c,struct write_point * wp)91 static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
92 {
93 	struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
94 	struct open_bucket *ob;
95 	unsigned i;
96 
97 	open_bucket_for_each(c, &wp->ptrs, ob, i)
98 		ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
99 	wp->ptrs = keep;
100 
101 	mutex_unlock(&wp->lock);
102 
103 	bch2_open_buckets_put(c, &ptrs);
104 }
105 
bch2_open_bucket_get(struct bch_fs * c,struct write_point * wp,struct open_buckets * ptrs)106 static inline void bch2_open_bucket_get(struct bch_fs *c,
107 					struct write_point *wp,
108 					struct open_buckets *ptrs)
109 {
110 	struct open_bucket *ob;
111 	unsigned i;
112 
113 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
114 		ob->data_type = wp->data_type;
115 		atomic_inc(&ob->pin);
116 		ob_push(c, ptrs, ob);
117 	}
118 }
119 
open_bucket_hashslot(struct bch_fs * c,unsigned dev,u64 bucket)120 static inline open_bucket_idx_t *open_bucket_hashslot(struct bch_fs *c,
121 						  unsigned dev, u64 bucket)
122 {
123 	return c->open_buckets_hash +
124 		(jhash_3words(dev, bucket, bucket >> 32, 0) &
125 		 (OPEN_BUCKETS_COUNT - 1));
126 }
127 
bch2_bucket_is_open(struct bch_fs * c,unsigned dev,u64 bucket)128 static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket)
129 {
130 	open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket);
131 
132 	while (slot) {
133 		struct open_bucket *ob = &c->open_buckets[slot];
134 
135 		if (ob->dev == dev && ob->bucket == bucket)
136 			return true;
137 
138 		slot = ob->hash;
139 	}
140 
141 	return false;
142 }
143 
bch2_bucket_is_open_safe(struct bch_fs * c,unsigned dev,u64 bucket)144 static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket)
145 {
146 	bool ret;
147 
148 	if (bch2_bucket_is_open(c, dev, bucket))
149 		return true;
150 
151 	spin_lock(&c->freelist_lock);
152 	ret = bch2_bucket_is_open(c, dev, bucket);
153 	spin_unlock(&c->freelist_lock);
154 
155 	return ret;
156 }
157 
158 enum bch_write_flags;
159 int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
160 		      struct dev_stripe_state *, struct bch_devs_mask *,
161 		      unsigned, unsigned *, bool *, enum bch_write_flags,
162 		      enum bch_data_type, enum bch_watermark,
163 		      struct closure *);
164 
165 int bch2_alloc_sectors_start_trans(struct btree_trans *,
166 				   unsigned, unsigned,
167 				   struct write_point_specifier,
168 				   struct bch_devs_list *,
169 				   unsigned, unsigned,
170 				   enum bch_watermark,
171 				   enum bch_write_flags,
172 				   struct closure *,
173 				   struct write_point **);
174 
175 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
176 
177 /*
178  * Append pointers to the space we just allocated to @k, and mark @sectors space
179  * as allocated out of @ob
180  */
181 static inline void
bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)182 bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
183 				       struct bkey_i *k, unsigned sectors,
184 				       bool cached)
185 {
186 	struct open_bucket *ob;
187 	unsigned i;
188 
189 	BUG_ON(sectors > wp->sectors_free);
190 	wp->sectors_free	-= sectors;
191 	wp->sectors_allocated	+= sectors;
192 
193 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
194 		struct bch_dev *ca = ob_dev(c, ob);
195 		struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
196 
197 		ptr.cached = cached ||
198 			(!ca->mi.durability &&
199 			 wp->data_type == BCH_DATA_user);
200 
201 		bch2_bkey_append_ptr(k, ptr);
202 
203 		BUG_ON(sectors > ob->sectors_free);
204 		ob->sectors_free -= sectors;
205 	}
206 }
207 
208 void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
209 				    struct bkey_i *, unsigned, bool);
210 void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
211 
212 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *, bool);
213 
writepoint_hashed(unsigned long v)214 static inline struct write_point_specifier writepoint_hashed(unsigned long v)
215 {
216 	return (struct write_point_specifier) { .v = v | 1 };
217 }
218 
writepoint_ptr(struct write_point * wp)219 static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
220 {
221 	return (struct write_point_specifier) { .v = (unsigned long) wp };
222 }
223 
224 void bch2_fs_allocator_foreground_init(struct bch_fs *);
225 
226 void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *);
227 void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *);
228 void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
229 
230 void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
231 
232 void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
233 void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
234 
235 void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)236 static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
237 {
238 	if (cl->closure_get_happened)
239 		__bch2_wait_on_allocator(c, cl);
240 }
241 
242 #endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
243