1 #ifndef BLK_THROTTLE_H
2 #define BLK_THROTTLE_H
3
4 #include "blk-cgroup-rwstat.h"
5
6 /*
7 * To implement hierarchical throttling, throtl_grps form a tree and bios
8 * are dispatched upwards level by level until they reach the top and get
9 * issued. When dispatching bios from the children and local group at each
10 * level, if the bios are dispatched into a single bio_list, there's a risk
11 * of a local or child group which can queue many bios at once filling up
12 * the list starving others.
13 *
14 * To avoid such starvation, dispatched bios are queued separately
15 * according to where they came from. When they are again dispatched to
16 * the parent, they're popped in round-robin order so that no single source
17 * hogs the dispatch window.
18 *
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
21 * throtl_service_queue and then dispatched in round-robin order.
22 *
23 * It's also used to track the reference counts on blkg's. A qnode always
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
25 * incrementing the reference of the associated throtl_grp when a qnode is
26 * queued and decrementing when dequeued is enough to keep the whole blkg
27 * tree pinned while bios are in flight.
28 */
29 struct throtl_qnode {
30 struct list_head node; /* service_queue->queued[] */
31 struct bio_list bios; /* queued bios */
32 struct throtl_grp *tg; /* tg this qnode belongs to */
33 };
34
35 struct throtl_service_queue {
36 struct throtl_service_queue *parent_sq; /* the parent service_queue */
37
38 /*
39 * Bios queued directly to this service_queue or dispatched from
40 * children throtl_grp's.
41 */
42 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
43 unsigned int nr_queued[2]; /* number of queued bios */
44
45 /*
46 * RB tree of active children throtl_grp's, which are sorted by
47 * their ->disptime.
48 */
49 struct rb_root_cached pending_tree; /* RB tree of active tgs */
50 unsigned int nr_pending; /* # queued in the tree */
51 unsigned long first_pending_disptime; /* disptime of the first tg */
52 struct timer_list pending_timer; /* fires on first_pending_disptime */
53 };
54
55 enum tg_state_flags {
56 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
57 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
58 THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
59 };
60
61 struct throtl_grp {
62 /* must be the first member */
63 struct blkg_policy_data pd;
64
65 /* active throtl group service_queue member */
66 struct rb_node rb_node;
67
68 /* throtl_data this group belongs to */
69 struct throtl_data *td;
70
71 /* this group's service queue */
72 struct throtl_service_queue service_queue;
73
74 /*
75 * qnode_on_self is used when bios are directly queued to this
76 * throtl_grp so that local bios compete fairly with bios
77 * dispatched from children. qnode_on_parent is used when bios are
78 * dispatched from this throtl_grp into its parent and will compete
79 * with the sibling qnode_on_parents and the parent's
80 * qnode_on_self.
81 */
82 struct throtl_qnode qnode_on_self[2];
83 struct throtl_qnode qnode_on_parent[2];
84
85 /*
86 * Dispatch time in jiffies. This is the estimated time when group
87 * will unthrottle and is ready to dispatch more bio. It is used as
88 * key to sort active groups in service tree.
89 */
90 unsigned long disptime;
91
92 unsigned int flags;
93
94 /* are there any throtl rules between this group and td? */
95 bool has_rules_bps[2];
96 bool has_rules_iops[2];
97
98 /* bytes per second rate limits */
99 uint64_t bps[2];
100
101 /* IOPS limits */
102 unsigned int iops[2];
103
104 /* Number of bytes dispatched in current slice */
105 uint64_t bytes_disp[2];
106 /* Number of bio's dispatched in current slice */
107 unsigned int io_disp[2];
108
109 uint64_t last_bytes_disp[2];
110 unsigned int last_io_disp[2];
111
112 /*
113 * The following two fields are updated when new configuration is
114 * submitted while some bios are still throttled, they record how many
115 * bytes/ios are waited already in previous configuration, and they will
116 * be used to calculate wait time under new configuration.
117 */
118 long long carryover_bytes[2];
119 int carryover_ios[2];
120
121 unsigned long last_check_time;
122
123 /* When did we start a new slice */
124 unsigned long slice_start[2];
125 unsigned long slice_end[2];
126
127 struct blkg_rwstat stat_bytes;
128 struct blkg_rwstat stat_ios;
129 };
130
131 extern struct blkcg_policy blkcg_policy_throtl;
132
pd_to_tg(struct blkg_policy_data * pd)133 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
134 {
135 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
136 }
137
blkg_to_tg(struct blkcg_gq * blkg)138 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
139 {
140 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
141 }
142
143 /*
144 * Internal throttling interface
145 */
146 #ifndef CONFIG_BLK_DEV_THROTTLING
blk_throtl_exit(struct gendisk * disk)147 static inline void blk_throtl_exit(struct gendisk *disk) { }
blk_throtl_bio(struct bio * bio)148 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
blk_throtl_cancel_bios(struct gendisk * disk)149 static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
150 #else /* CONFIG_BLK_DEV_THROTTLING */
151 void blk_throtl_exit(struct gendisk *disk);
152 bool __blk_throtl_bio(struct bio *bio);
153 void blk_throtl_cancel_bios(struct gendisk *disk);
154
blk_throtl_activated(struct request_queue * q)155 static inline bool blk_throtl_activated(struct request_queue *q)
156 {
157 return q->td != NULL;
158 }
159
blk_should_throtl(struct bio * bio)160 static inline bool blk_should_throtl(struct bio *bio)
161 {
162 struct throtl_grp *tg;
163 int rw = bio_data_dir(bio);
164
165 /*
166 * This is called under bio_queue_enter(), and it's synchronized with
167 * the activation of blk-throtl, which is protected by
168 * blk_mq_freeze_queue().
169 */
170 if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
171 return false;
172
173 tg = blkg_to_tg(bio->bi_blkg);
174 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
175 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
176 bio_set_flag(bio, BIO_CGROUP_ACCT);
177 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
178 bio->bi_iter.bi_size);
179 }
180 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
181 }
182
183 /* iops limit is always counted */
184 if (tg->has_rules_iops[rw])
185 return true;
186
187 if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED))
188 return true;
189
190 return false;
191 }
192
blk_throtl_bio(struct bio * bio)193 static inline bool blk_throtl_bio(struct bio *bio)
194 {
195
196 if (!blk_should_throtl(bio))
197 return false;
198
199 return __blk_throtl_bio(bio);
200 }
201 #endif /* CONFIG_BLK_DEV_THROTTLING */
202
203 #endif
204