1  /* SPDX-License-Identifier: GPL-2.0
2   *
3   * IO cost model based controller.
4   *
5   * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6   * Copyright (C) 2019 Andy Newell <newella@fb.com>
7   * Copyright (C) 2019 Facebook
8   *
9   * One challenge of controlling IO resources is the lack of trivially
10   * observable cost metric.  This is distinguished from CPU and memory where
11   * wallclock time and the number of bytes can serve as accurate enough
12   * approximations.
13   *
14   * Bandwidth and iops are the most commonly used metrics for IO devices but
15   * depending on the type and specifics of the device, different IO patterns
16   * easily lead to multiple orders of magnitude variations rendering them
17   * useless for the purpose of IO capacity distribution.  While on-device
18   * time, with a lot of clutches, could serve as a useful approximation for
19   * non-queued rotational devices, this is no longer viable with modern
20   * devices, even the rotational ones.
21   *
22   * While there is no cost metric we can trivially observe, it isn't a
23   * complete mystery.  For example, on a rotational device, seek cost
24   * dominates while a contiguous transfer contributes a smaller amount
25   * proportional to the size.  If we can characterize at least the relative
26   * costs of these different types of IOs, it should be possible to
27   * implement a reasonable work-conserving proportional IO resource
28   * distribution.
29   *
30   * 1. IO Cost Model
31   *
32   * IO cost model estimates the cost of an IO given its basic parameters and
33   * history (e.g. the end sector of the last IO).  The cost is measured in
34   * device time.  If a given IO is estimated to cost 10ms, the device should
35   * be able to process ~100 of those IOs in a second.
36   *
37   * Currently, there's only one builtin cost model - linear.  Each IO is
38   * classified as sequential or random and given a base cost accordingly.
39   * On top of that, a size cost proportional to the length of the IO is
40   * added.  While simple, this model captures the operational
41   * characteristics of a wide varienty of devices well enough.  Default
42   * parameters for several different classes of devices are provided and the
43   * parameters can be configured from userspace via
44   * /sys/fs/cgroup/io.cost.model.
45   *
46   * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47   * device-specific coefficients.
48   *
49   * 2. Control Strategy
50   *
51   * The device virtual time (vtime) is used as the primary control metric.
52   * The control strategy is composed of the following three parts.
53   *
54   * 2-1. Vtime Distribution
55   *
56   * When a cgroup becomes active in terms of IOs, its hierarchical share is
57   * calculated.  Please consider the following hierarchy where the numbers
58   * inside parentheses denote the configured weights.
59   *
60   *           root
61   *         /       \
62   *      A (w:100)  B (w:300)
63   *      /       \
64   *  A0 (w:100)  A1 (w:100)
65   *
66   * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67   * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68   * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69   * 12.5% each.  The distribution mechanism only cares about these flattened
70   * shares.  They're called hweights (hierarchical weights) and always add
71   * upto 1 (WEIGHT_ONE).
72   *
73   * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74   * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75   * against the device vtime - an IO which takes 10ms on the underlying
76   * device is considered to take 80ms on A0.
77   *
78   * This constitutes the basis of IO capacity distribution.  Each cgroup's
79   * vtime is running at a rate determined by its hweight.  A cgroup tracks
80   * the vtime consumed by past IOs and can issue a new IO if doing so
81   * wouldn't outrun the current device vtime.  Otherwise, the IO is
82   * suspended until the vtime has progressed enough to cover it.
83   *
84   * 2-2. Vrate Adjustment
85   *
86   * It's unrealistic to expect the cost model to be perfect.  There are too
87   * many devices and even on the same device the overall performance
88   * fluctuates depending on numerous factors such as IO mixture and device
89   * internal garbage collection.  The controller needs to adapt dynamically.
90   *
91   * This is achieved by adjusting the overall IO rate according to how busy
92   * the device is.  If the device becomes overloaded, we're sending down too
93   * many IOs and should generally slow down.  If there are waiting issuers
94   * but the device isn't saturated, we're issuing too few and should
95   * generally speed up.
96   *
97   * To slow down, we lower the vrate - the rate at which the device vtime
98   * passes compared to the wall clock.  For example, if the vtime is running
99   * at the vrate of 75%, all cgroups added up would only be able to issue
100   * 750ms worth of IOs per second, and vice-versa for speeding up.
101   *
102   * Device business is determined using two criteria - rq wait and
103   * completion latencies.
104   *
105   * When a device gets saturated, the on-device and then the request queues
106   * fill up and a bio which is ready to be issued has to wait for a request
107   * to become available.  When this delay becomes noticeable, it's a clear
108   * indication that the device is saturated and we lower the vrate.  This
109   * saturation signal is fairly conservative as it only triggers when both
110   * hardware and software queues are filled up, and is used as the default
111   * busy signal.
112   *
113   * As devices can have deep queues and be unfair in how the queued commands
114   * are executed, solely depending on rq wait may not result in satisfactory
115   * control quality.  For a better control quality, completion latency QoS
116   * parameters can be configured so that the device is considered saturated
117   * if N'th percentile completion latency rises above the set point.
118   *
119   * The completion latency requirements are a function of both the
120   * underlying device characteristics and the desired IO latency quality of
121   * service.  There is an inherent trade-off - the tighter the latency QoS,
122   * the higher the bandwidth lossage.  Latency QoS is disabled by default
123   * and can be set through /sys/fs/cgroup/io.cost.qos.
124   *
125   * 2-3. Work Conservation
126   *
127   * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128   * periodically while B is sending out enough parallel IOs to saturate the
129   * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130   * cost per second, i.e., 10% of the device capacity.  The naive
131   * distribution of half and half would lead to 60% utilization of the
132   * device, a significant reduction in the total amount of work done
133   * compared to free-for-all competition.  This is too high a cost to pay
134   * for IO control.
135   *
136   * To conserve the total amount of work done, we keep track of how much
137   * each active cgroup is actually using and yield part of its weight if
138   * there are other cgroups which can make use of it.  In the above case,
139   * A's weight will be lowered so that it hovers above the actual usage and
140   * B would be able to use the rest.
141   *
142   * As we don't want to penalize a cgroup for donating its weight, the
143   * surplus weight adjustment factors in a margin and has an immediate
144   * snapback mechanism in case the cgroup needs more IO vtime for itself.
145   *
146   * Note that adjusting down surplus weights has the same effects as
147   * accelerating vtime for other cgroups and work conservation can also be
148   * implemented by adjusting vrate dynamically.  However, squaring who can
149   * donate and should take back how much requires hweight propagations
150   * anyway making it easier to implement and understand as a separate
151   * mechanism.
152   *
153   * 3. Monitoring
154   *
155   * Instead of debugfs or other clumsy monitoring mechanisms, this
156   * controller uses a drgn based monitoring script -
157   * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158   * https://github.com/osandov/drgn.  The output looks like the following.
159   *
160   *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161   *                 active      weight      hweight% inflt% dbt  delay usages%
162   *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163   *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164   *
165   * - per	: Timer period
166   * - cur_per	: Internal wall and device vtime clock
167   * - vrate	: Device virtual time rate against wall clock
168   * - weight	: Surplus-adjusted and configured weights
169   * - hweight	: Surplus-adjusted and configured hierarchical weights
170   * - inflt	: The percentage of in-flight IO cost at the end of last period
171   * - del_ms	: Deferred issuer delay induction level and duration
172   * - usages	: Usage history
173   */
174  
175  #include <linux/kernel.h>
176  #include <linux/module.h>
177  #include <linux/timer.h>
178  #include <linux/time64.h>
179  #include <linux/parser.h>
180  #include <linux/sched/signal.h>
181  #include <asm/local.h>
182  #include <asm/local64.h>
183  #include "blk-rq-qos.h"
184  #include "blk-stat.h"
185  #include "blk-wbt.h"
186  #include "blk-cgroup.h"
187  
188  #ifdef CONFIG_TRACEPOINTS
189  
190  /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191  #define TRACE_IOCG_PATH_LEN 1024
192  static DEFINE_SPINLOCK(trace_iocg_path_lock);
193  static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194  
195  #define TRACE_IOCG_PATH(type, iocg, ...)					\
196  	do {									\
197  		unsigned long flags;						\
198  		if (trace_iocost_##type##_enabled()) {				\
199  			spin_lock_irqsave(&trace_iocg_path_lock, flags);	\
200  			cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,	\
201  				    trace_iocg_path, TRACE_IOCG_PATH_LEN);	\
202  			trace_iocost_##type(iocg, trace_iocg_path,		\
203  					      ##__VA_ARGS__);			\
204  			spin_unlock_irqrestore(&trace_iocg_path_lock, flags);	\
205  		}								\
206  	} while (0)
207  
208  #else	/* CONFIG_TRACE_POINTS */
209  #define TRACE_IOCG_PATH(type, iocg, ...)	do { } while (0)
210  #endif	/* CONFIG_TRACE_POINTS */
211  
212  enum {
213  	MILLION			= 1000000,
214  
215  	/* timer period is calculated from latency requirements, bound it */
216  	MIN_PERIOD		= USEC_PER_MSEC,
217  	MAX_PERIOD		= USEC_PER_SEC,
218  
219  	/*
220  	 * iocg->vtime is targeted at 50% behind the device vtime, which
221  	 * serves as its IO credit buffer.  Surplus weight adjustment is
222  	 * immediately canceled if the vtime margin runs below 10%.
223  	 */
224  	MARGIN_MIN_PCT		= 10,
225  	MARGIN_LOW_PCT		= 20,
226  	MARGIN_TARGET_PCT	= 50,
227  
228  	INUSE_ADJ_STEP_PCT	= 25,
229  
230  	/* Have some play in timer operations */
231  	TIMER_SLACK_PCT		= 1,
232  
233  	/* 1/64k is granular enough and can easily be handled w/ u32 */
234  	WEIGHT_ONE		= 1 << 16,
235  };
236  
237  enum {
238  	/*
239  	 * As vtime is used to calculate the cost of each IO, it needs to
240  	 * be fairly high precision.  For example, it should be able to
241  	 * represent the cost of a single page worth of discard with
242  	 * suffificient accuracy.  At the same time, it should be able to
243  	 * represent reasonably long enough durations to be useful and
244  	 * convenient during operation.
245  	 *
246  	 * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
247  	 * granularity and days of wrap-around time even at extreme vrates.
248  	 */
249  	VTIME_PER_SEC_SHIFT	= 37,
250  	VTIME_PER_SEC		= 1LLU << VTIME_PER_SEC_SHIFT,
251  	VTIME_PER_USEC		= VTIME_PER_SEC / USEC_PER_SEC,
252  	VTIME_PER_NSEC		= VTIME_PER_SEC / NSEC_PER_SEC,
253  
254  	/* bound vrate adjustments within two orders of magnitude */
255  	VRATE_MIN_PPM		= 10000,	/* 1% */
256  	VRATE_MAX_PPM		= 100000000,	/* 10000% */
257  
258  	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259  	VRATE_CLAMP_ADJ_PCT	= 4,
260  
261  	/* switch iff the conditions are met for longer than this */
262  	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
263  };
264  
265  enum {
266  	/* if IOs end up waiting for requests, issue less */
267  	RQ_WAIT_BUSY_PCT	= 5,
268  
269  	/* unbusy hysterisis */
270  	UNBUSY_THR_PCT		= 75,
271  
272  	/*
273  	 * The effect of delay is indirect and non-linear and a huge amount of
274  	 * future debt can accumulate abruptly while unthrottled. Linearly scale
275  	 * up delay as debt is going up and then let it decay exponentially.
276  	 * This gives us quick ramp ups while delay is accumulating and long
277  	 * tails which can help reducing the frequency of debt explosions on
278  	 * unthrottle. The parameters are experimentally determined.
279  	 *
280  	 * The delay mechanism provides adequate protection and behavior in many
281  	 * cases. However, this is far from ideal and falls shorts on both
282  	 * fronts. The debtors are often throttled too harshly costing a
283  	 * significant level of fairness and possibly total work while the
284  	 * protection against their impacts on the system can be choppy and
285  	 * unreliable.
286  	 *
287  	 * The shortcoming primarily stems from the fact that, unlike for page
288  	 * cache, the kernel doesn't have well-defined back-pressure propagation
289  	 * mechanism and policies for anonymous memory. Fully addressing this
290  	 * issue will likely require substantial improvements in the area.
291  	 */
292  	MIN_DELAY_THR_PCT	= 500,
293  	MAX_DELAY_THR_PCT	= 25000,
294  	MIN_DELAY		= 250,
295  	MAX_DELAY		= 250 * USEC_PER_MSEC,
296  
297  	/* halve debts if avg usage over 100ms is under 50% */
298  	DFGV_USAGE_PCT		= 50,
299  	DFGV_PERIOD		= 100 * USEC_PER_MSEC,
300  
301  	/* don't let cmds which take a very long time pin lagging for too long */
302  	MAX_LAGGING_PERIODS	= 10,
303  
304  	/*
305  	 * Count IO size in 4k pages.  The 12bit shift helps keeping
306  	 * size-proportional components of cost calculation in closer
307  	 * numbers of digits to per-IO cost components.
308  	 */
309  	IOC_PAGE_SHIFT		= 12,
310  	IOC_PAGE_SIZE		= 1 << IOC_PAGE_SHIFT,
311  	IOC_SECT_TO_PAGE_SHIFT	= IOC_PAGE_SHIFT - SECTOR_SHIFT,
312  
313  	/* if apart further than 16M, consider randio for linear model */
314  	LCOEF_RANDIO_PAGES	= 4096,
315  };
316  
317  enum ioc_running {
318  	IOC_IDLE,
319  	IOC_RUNNING,
320  	IOC_STOP,
321  };
322  
323  /* io.cost.qos controls including per-dev enable of the whole controller */
324  enum {
325  	QOS_ENABLE,
326  	QOS_CTRL,
327  	NR_QOS_CTRL_PARAMS,
328  };
329  
330  /* io.cost.qos params */
331  enum {
332  	QOS_RPPM,
333  	QOS_RLAT,
334  	QOS_WPPM,
335  	QOS_WLAT,
336  	QOS_MIN,
337  	QOS_MAX,
338  	NR_QOS_PARAMS,
339  };
340  
341  /* io.cost.model controls */
342  enum {
343  	COST_CTRL,
344  	COST_MODEL,
345  	NR_COST_CTRL_PARAMS,
346  };
347  
348  /* builtin linear cost model coefficients */
349  enum {
350  	I_LCOEF_RBPS,
351  	I_LCOEF_RSEQIOPS,
352  	I_LCOEF_RRANDIOPS,
353  	I_LCOEF_WBPS,
354  	I_LCOEF_WSEQIOPS,
355  	I_LCOEF_WRANDIOPS,
356  	NR_I_LCOEFS,
357  };
358  
359  enum {
360  	LCOEF_RPAGE,
361  	LCOEF_RSEQIO,
362  	LCOEF_RRANDIO,
363  	LCOEF_WPAGE,
364  	LCOEF_WSEQIO,
365  	LCOEF_WRANDIO,
366  	NR_LCOEFS,
367  };
368  
369  enum {
370  	AUTOP_INVALID,
371  	AUTOP_HDD,
372  	AUTOP_SSD_QD1,
373  	AUTOP_SSD_DFL,
374  	AUTOP_SSD_FAST,
375  };
376  
377  struct ioc_params {
378  	u32				qos[NR_QOS_PARAMS];
379  	u64				i_lcoefs[NR_I_LCOEFS];
380  	u64				lcoefs[NR_LCOEFS];
381  	u32				too_fast_vrate_pct;
382  	u32				too_slow_vrate_pct;
383  };
384  
385  struct ioc_margins {
386  	s64				min;
387  	s64				low;
388  	s64				target;
389  };
390  
391  struct ioc_missed {
392  	local_t				nr_met;
393  	local_t				nr_missed;
394  	u32				last_met;
395  	u32				last_missed;
396  };
397  
398  struct ioc_pcpu_stat {
399  	struct ioc_missed		missed[2];
400  
401  	local64_t			rq_wait_ns;
402  	u64				last_rq_wait_ns;
403  };
404  
405  /* per device */
406  struct ioc {
407  	struct rq_qos			rqos;
408  
409  	bool				enabled;
410  
411  	struct ioc_params		params;
412  	struct ioc_margins		margins;
413  	u32				period_us;
414  	u32				timer_slack_ns;
415  	u64				vrate_min;
416  	u64				vrate_max;
417  
418  	spinlock_t			lock;
419  	struct timer_list		timer;
420  	struct list_head		active_iocgs;	/* active cgroups */
421  	struct ioc_pcpu_stat __percpu	*pcpu_stat;
422  
423  	enum ioc_running		running;
424  	atomic64_t			vtime_rate;
425  	u64				vtime_base_rate;
426  	s64				vtime_err;
427  
428  	seqcount_spinlock_t		period_seqcount;
429  	u64				period_at;	/* wallclock starttime */
430  	u64				period_at_vtime; /* vtime starttime */
431  
432  	atomic64_t			cur_period;	/* inc'd each period */
433  	int				busy_level;	/* saturation history */
434  
435  	bool				weights_updated;
436  	atomic_t			hweight_gen;	/* for lazy hweights */
437  
438  	/* debt forgivness */
439  	u64				dfgv_period_at;
440  	u64				dfgv_period_rem;
441  	u64				dfgv_usage_us_sum;
442  
443  	u64				autop_too_fast_at;
444  	u64				autop_too_slow_at;
445  	int				autop_idx;
446  	bool				user_qos_params:1;
447  	bool				user_cost_model:1;
448  };
449  
450  struct iocg_pcpu_stat {
451  	local64_t			abs_vusage;
452  };
453  
454  struct iocg_stat {
455  	u64				usage_us;
456  	u64				wait_us;
457  	u64				indebt_us;
458  	u64				indelay_us;
459  };
460  
461  /* per device-cgroup pair */
462  struct ioc_gq {
463  	struct blkg_policy_data		pd;
464  	struct ioc			*ioc;
465  
466  	/*
467  	 * A iocg can get its weight from two sources - an explicit
468  	 * per-device-cgroup configuration or the default weight of the
469  	 * cgroup.  `cfg_weight` is the explicit per-device-cgroup
470  	 * configuration.  `weight` is the effective considering both
471  	 * sources.
472  	 *
473  	 * When an idle cgroup becomes active its `active` goes from 0 to
474  	 * `weight`.  `inuse` is the surplus adjusted active weight.
475  	 * `active` and `inuse` are used to calculate `hweight_active` and
476  	 * `hweight_inuse`.
477  	 *
478  	 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479  	 * surplus adjustments.
480  	 *
481  	 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482  	 * to determine and track adjustments.
483  	 */
484  	u32				cfg_weight;
485  	u32				weight;
486  	u32				active;
487  	u32				inuse;
488  
489  	u32				last_inuse;
490  	s64				saved_margin;
491  
492  	sector_t			cursor;		/* to detect randio */
493  
494  	/*
495  	 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496  	 * issued.  If lagging behind device vtime, the delta represents
497  	 * the currently available IO budget.  If running ahead, the
498  	 * overage.
499  	 *
500  	 * `vtime_done` is the same but progressed on completion rather
501  	 * than issue.  The delta behind `vtime` represents the cost of
502  	 * currently in-flight IOs.
503  	 */
504  	atomic64_t			vtime;
505  	atomic64_t			done_vtime;
506  	u64				abs_vdebt;
507  
508  	/* current delay in effect and when it started */
509  	u64				delay;
510  	u64				delay_at;
511  
512  	/*
513  	 * The period this iocg was last active in.  Used for deactivation
514  	 * and invalidating `vtime`.
515  	 */
516  	atomic64_t			active_period;
517  	struct list_head		active_list;
518  
519  	/* see __propagate_weights() and current_hweight() for details */
520  	u64				child_active_sum;
521  	u64				child_inuse_sum;
522  	u64				child_adjusted_sum;
523  	int				hweight_gen;
524  	u32				hweight_active;
525  	u32				hweight_inuse;
526  	u32				hweight_donating;
527  	u32				hweight_after_donation;
528  
529  	struct list_head		walk_list;
530  	struct list_head		surplus_list;
531  
532  	struct wait_queue_head		waitq;
533  	struct hrtimer			waitq_timer;
534  
535  	/* timestamp at the latest activation */
536  	u64				activated_at;
537  
538  	/* statistics */
539  	struct iocg_pcpu_stat __percpu	*pcpu_stat;
540  	struct iocg_stat		stat;
541  	struct iocg_stat		last_stat;
542  	u64				last_stat_abs_vusage;
543  	u64				usage_delta_us;
544  	u64				wait_since;
545  	u64				indebt_since;
546  	u64				indelay_since;
547  
548  	/* this iocg's depth in the hierarchy and ancestors including self */
549  	int				level;
550  	struct ioc_gq			*ancestors[];
551  };
552  
553  /* per cgroup */
554  struct ioc_cgrp {
555  	struct blkcg_policy_data	cpd;
556  	unsigned int			dfl_weight;
557  };
558  
559  struct ioc_now {
560  	u64				now_ns;
561  	u64				now;
562  	u64				vnow;
563  };
564  
565  struct iocg_wait {
566  	struct wait_queue_entry		wait;
567  	struct bio			*bio;
568  	u64				abs_cost;
569  	bool				committed;
570  };
571  
572  struct iocg_wake_ctx {
573  	struct ioc_gq			*iocg;
574  	u32				hw_inuse;
575  	s64				vbudget;
576  };
577  
578  static const struct ioc_params autop[] = {
579  	[AUTOP_HDD] = {
580  		.qos				= {
581  			[QOS_RLAT]		=        250000, /* 250ms */
582  			[QOS_WLAT]		=        250000,
583  			[QOS_MIN]		= VRATE_MIN_PPM,
584  			[QOS_MAX]		= VRATE_MAX_PPM,
585  		},
586  		.i_lcoefs			= {
587  			[I_LCOEF_RBPS]		=     174019176,
588  			[I_LCOEF_RSEQIOPS]	=         41708,
589  			[I_LCOEF_RRANDIOPS]	=           370,
590  			[I_LCOEF_WBPS]		=     178075866,
591  			[I_LCOEF_WSEQIOPS]	=         42705,
592  			[I_LCOEF_WRANDIOPS]	=           378,
593  		},
594  	},
595  	[AUTOP_SSD_QD1] = {
596  		.qos				= {
597  			[QOS_RLAT]		=         25000, /* 25ms */
598  			[QOS_WLAT]		=         25000,
599  			[QOS_MIN]		= VRATE_MIN_PPM,
600  			[QOS_MAX]		= VRATE_MAX_PPM,
601  		},
602  		.i_lcoefs			= {
603  			[I_LCOEF_RBPS]		=     245855193,
604  			[I_LCOEF_RSEQIOPS]	=         61575,
605  			[I_LCOEF_RRANDIOPS]	=          6946,
606  			[I_LCOEF_WBPS]		=     141365009,
607  			[I_LCOEF_WSEQIOPS]	=         33716,
608  			[I_LCOEF_WRANDIOPS]	=         26796,
609  		},
610  	},
611  	[AUTOP_SSD_DFL] = {
612  		.qos				= {
613  			[QOS_RLAT]		=         25000, /* 25ms */
614  			[QOS_WLAT]		=         25000,
615  			[QOS_MIN]		= VRATE_MIN_PPM,
616  			[QOS_MAX]		= VRATE_MAX_PPM,
617  		},
618  		.i_lcoefs			= {
619  			[I_LCOEF_RBPS]		=     488636629,
620  			[I_LCOEF_RSEQIOPS]	=          8932,
621  			[I_LCOEF_RRANDIOPS]	=          8518,
622  			[I_LCOEF_WBPS]		=     427891549,
623  			[I_LCOEF_WSEQIOPS]	=         28755,
624  			[I_LCOEF_WRANDIOPS]	=         21940,
625  		},
626  		.too_fast_vrate_pct		=           500,
627  	},
628  	[AUTOP_SSD_FAST] = {
629  		.qos				= {
630  			[QOS_RLAT]		=          5000, /* 5ms */
631  			[QOS_WLAT]		=          5000,
632  			[QOS_MIN]		= VRATE_MIN_PPM,
633  			[QOS_MAX]		= VRATE_MAX_PPM,
634  		},
635  		.i_lcoefs			= {
636  			[I_LCOEF_RBPS]		=    3102524156LLU,
637  			[I_LCOEF_RSEQIOPS]	=        724816,
638  			[I_LCOEF_RRANDIOPS]	=        778122,
639  			[I_LCOEF_WBPS]		=    1742780862LLU,
640  			[I_LCOEF_WSEQIOPS]	=        425702,
641  			[I_LCOEF_WRANDIOPS]	=	 443193,
642  		},
643  		.too_slow_vrate_pct		=            10,
644  	},
645  };
646  
647  /*
648   * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
649   * vtime credit shortage and down on device saturation.
650   */
651  static const u32 vrate_adj_pct[] =
652  	{ 0, 0, 0, 0,
653  	  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654  	  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655  	  4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
656  
657  static struct blkcg_policy blkcg_policy_iocost;
658  
659  /* accessors and helpers */
rqos_to_ioc(struct rq_qos * rqos)660  static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
661  {
662  	return container_of(rqos, struct ioc, rqos);
663  }
664  
q_to_ioc(struct request_queue * q)665  static struct ioc *q_to_ioc(struct request_queue *q)
666  {
667  	return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668  }
669  
ioc_name(struct ioc * ioc)670  static const char __maybe_unused *ioc_name(struct ioc *ioc)
671  {
672  	struct gendisk *disk = ioc->rqos.disk;
673  
674  	if (!disk)
675  		return "<unknown>";
676  	return disk->disk_name;
677  }
678  
pd_to_iocg(struct blkg_policy_data * pd)679  static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
680  {
681  	return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
682  }
683  
blkg_to_iocg(struct blkcg_gq * blkg)684  static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
685  {
686  	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
687  }
688  
iocg_to_blkg(struct ioc_gq * iocg)689  static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
690  {
691  	return pd_to_blkg(&iocg->pd);
692  }
693  
blkcg_to_iocc(struct blkcg * blkcg)694  static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
695  {
696  	return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
697  			    struct ioc_cgrp, cpd);
698  }
699  
700  /*
701   * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
702   * weight, the more expensive each IO.  Must round up.
703   */
abs_cost_to_cost(u64 abs_cost,u32 hw_inuse)704  static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
705  {
706  	return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
707  }
708  
709  /*
710   * The inverse of abs_cost_to_cost().  Must round up.
711   */
cost_to_abs_cost(u64 cost,u32 hw_inuse)712  static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
713  {
714  	return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
715  }
716  
iocg_commit_bio(struct ioc_gq * iocg,struct bio * bio,u64 abs_cost,u64 cost)717  static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
718  			    u64 abs_cost, u64 cost)
719  {
720  	struct iocg_pcpu_stat *gcs;
721  
722  	bio->bi_iocost_cost = cost;
723  	atomic64_add(cost, &iocg->vtime);
724  
725  	gcs = get_cpu_ptr(iocg->pcpu_stat);
726  	local64_add(abs_cost, &gcs->abs_vusage);
727  	put_cpu_ptr(gcs);
728  }
729  
iocg_lock(struct ioc_gq * iocg,bool lock_ioc,unsigned long * flags)730  static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
731  {
732  	if (lock_ioc) {
733  		spin_lock_irqsave(&iocg->ioc->lock, *flags);
734  		spin_lock(&iocg->waitq.lock);
735  	} else {
736  		spin_lock_irqsave(&iocg->waitq.lock, *flags);
737  	}
738  }
739  
iocg_unlock(struct ioc_gq * iocg,bool unlock_ioc,unsigned long * flags)740  static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
741  {
742  	if (unlock_ioc) {
743  		spin_unlock(&iocg->waitq.lock);
744  		spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
745  	} else {
746  		spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
747  	}
748  }
749  
750  #define CREATE_TRACE_POINTS
751  #include <trace/events/iocost.h>
752  
ioc_refresh_margins(struct ioc * ioc)753  static void ioc_refresh_margins(struct ioc *ioc)
754  {
755  	struct ioc_margins *margins = &ioc->margins;
756  	u32 period_us = ioc->period_us;
757  	u64 vrate = ioc->vtime_base_rate;
758  
759  	margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
760  	margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
761  	margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
762  }
763  
764  /* latency Qos params changed, update period_us and all the dependent params */
ioc_refresh_period_us(struct ioc * ioc)765  static void ioc_refresh_period_us(struct ioc *ioc)
766  {
767  	u32 ppm, lat, multi, period_us;
768  
769  	lockdep_assert_held(&ioc->lock);
770  
771  	/* pick the higher latency target */
772  	if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
773  		ppm = ioc->params.qos[QOS_RPPM];
774  		lat = ioc->params.qos[QOS_RLAT];
775  	} else {
776  		ppm = ioc->params.qos[QOS_WPPM];
777  		lat = ioc->params.qos[QOS_WLAT];
778  	}
779  
780  	/*
781  	 * We want the period to be long enough to contain a healthy number
782  	 * of IOs while short enough for granular control.  Define it as a
783  	 * multiple of the latency target.  Ideally, the multiplier should
784  	 * be scaled according to the percentile so that it would nominally
785  	 * contain a certain number of requests.  Let's be simpler and
786  	 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
787  	 */
788  	if (ppm)
789  		multi = max_t(u32, (MILLION - ppm) / 50000, 2);
790  	else
791  		multi = 2;
792  	period_us = multi * lat;
793  	period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
794  
795  	/* calculate dependent params */
796  	ioc->period_us = period_us;
797  	ioc->timer_slack_ns = div64_u64(
798  		(u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
799  		100);
800  	ioc_refresh_margins(ioc);
801  }
802  
803  /*
804   *  ioc->rqos.disk isn't initialized when this function is called from
805   *  the init path.
806   */
ioc_autop_idx(struct ioc * ioc,struct gendisk * disk)807  static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
808  {
809  	int idx = ioc->autop_idx;
810  	const struct ioc_params *p = &autop[idx];
811  	u32 vrate_pct;
812  	u64 now_ns;
813  
814  	/* rotational? */
815  	if (!blk_queue_nonrot(disk->queue))
816  		return AUTOP_HDD;
817  
818  	/* handle SATA SSDs w/ broken NCQ */
819  	if (blk_queue_depth(disk->queue) == 1)
820  		return AUTOP_SSD_QD1;
821  
822  	/* use one of the normal ssd sets */
823  	if (idx < AUTOP_SSD_DFL)
824  		return AUTOP_SSD_DFL;
825  
826  	/* if user is overriding anything, maintain what was there */
827  	if (ioc->user_qos_params || ioc->user_cost_model)
828  		return idx;
829  
830  	/* step up/down based on the vrate */
831  	vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
832  	now_ns = blk_time_get_ns();
833  
834  	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
835  		if (!ioc->autop_too_fast_at)
836  			ioc->autop_too_fast_at = now_ns;
837  		if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
838  			return idx + 1;
839  	} else {
840  		ioc->autop_too_fast_at = 0;
841  	}
842  
843  	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
844  		if (!ioc->autop_too_slow_at)
845  			ioc->autop_too_slow_at = now_ns;
846  		if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
847  			return idx - 1;
848  	} else {
849  		ioc->autop_too_slow_at = 0;
850  	}
851  
852  	return idx;
853  }
854  
855  /*
856   * Take the followings as input
857   *
858   *  @bps	maximum sequential throughput
859   *  @seqiops	maximum sequential 4k iops
860   *  @randiops	maximum random 4k iops
861   *
862   * and calculate the linear model cost coefficients.
863   *
864   *  *@page	per-page cost		1s / (@bps / 4096)
865   *  *@seqio	base cost of a seq IO	max((1s / @seqiops) - *@page, 0)
866   *  @randiops	base cost of a rand IO	max((1s / @randiops) - *@page, 0)
867   */
calc_lcoefs(u64 bps,u64 seqiops,u64 randiops,u64 * page,u64 * seqio,u64 * randio)868  static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
869  			u64 *page, u64 *seqio, u64 *randio)
870  {
871  	u64 v;
872  
873  	*page = *seqio = *randio = 0;
874  
875  	if (bps) {
876  		u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
877  
878  		if (bps_pages)
879  			*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
880  		else
881  			*page = 1;
882  	}
883  
884  	if (seqiops) {
885  		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
886  		if (v > *page)
887  			*seqio = v - *page;
888  	}
889  
890  	if (randiops) {
891  		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
892  		if (v > *page)
893  			*randio = v - *page;
894  	}
895  }
896  
ioc_refresh_lcoefs(struct ioc * ioc)897  static void ioc_refresh_lcoefs(struct ioc *ioc)
898  {
899  	u64 *u = ioc->params.i_lcoefs;
900  	u64 *c = ioc->params.lcoefs;
901  
902  	calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
903  		    &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
904  	calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
905  		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
906  }
907  
908  /*
909   * struct gendisk is required as an argument because ioc->rqos.disk
910   * is not properly initialized when called from the init path.
911   */
ioc_refresh_params_disk(struct ioc * ioc,bool force,struct gendisk * disk)912  static bool ioc_refresh_params_disk(struct ioc *ioc, bool force,
913  				    struct gendisk *disk)
914  {
915  	const struct ioc_params *p;
916  	int idx;
917  
918  	lockdep_assert_held(&ioc->lock);
919  
920  	idx = ioc_autop_idx(ioc, disk);
921  	p = &autop[idx];
922  
923  	if (idx == ioc->autop_idx && !force)
924  		return false;
925  
926  	if (idx != ioc->autop_idx) {
927  		atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
928  		ioc->vtime_base_rate = VTIME_PER_USEC;
929  	}
930  
931  	ioc->autop_idx = idx;
932  	ioc->autop_too_fast_at = 0;
933  	ioc->autop_too_slow_at = 0;
934  
935  	if (!ioc->user_qos_params)
936  		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
937  	if (!ioc->user_cost_model)
938  		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
939  
940  	ioc_refresh_period_us(ioc);
941  	ioc_refresh_lcoefs(ioc);
942  
943  	ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
944  					    VTIME_PER_USEC, MILLION);
945  	ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] *
946  					    VTIME_PER_USEC, MILLION);
947  
948  	return true;
949  }
950  
ioc_refresh_params(struct ioc * ioc,bool force)951  static bool ioc_refresh_params(struct ioc *ioc, bool force)
952  {
953  	return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk);
954  }
955  
956  /*
957   * When an iocg accumulates too much vtime or gets deactivated, we throw away
958   * some vtime, which lowers the overall device utilization. As the exact amount
959   * which is being thrown away is known, we can compensate by accelerating the
960   * vrate accordingly so that the extra vtime generated in the current period
961   * matches what got lost.
962   */
ioc_refresh_vrate(struct ioc * ioc,struct ioc_now * now)963  static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
964  {
965  	s64 pleft = ioc->period_at + ioc->period_us - now->now;
966  	s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
967  	s64 vcomp, vcomp_min, vcomp_max;
968  
969  	lockdep_assert_held(&ioc->lock);
970  
971  	/* we need some time left in this period */
972  	if (pleft <= 0)
973  		goto done;
974  
975  	/*
976  	 * Calculate how much vrate should be adjusted to offset the error.
977  	 * Limit the amount of adjustment and deduct the adjusted amount from
978  	 * the error.
979  	 */
980  	vcomp = -div64_s64(ioc->vtime_err, pleft);
981  	vcomp_min = -(ioc->vtime_base_rate >> 1);
982  	vcomp_max = ioc->vtime_base_rate;
983  	vcomp = clamp(vcomp, vcomp_min, vcomp_max);
984  
985  	ioc->vtime_err += vcomp * pleft;
986  
987  	atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
988  done:
989  	/* bound how much error can accumulate */
990  	ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
991  }
992  
ioc_adjust_base_vrate(struct ioc * ioc,u32 rq_wait_pct,int nr_lagging,int nr_shortages,int prev_busy_level,u32 * missed_ppm)993  static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
994  				  int nr_lagging, int nr_shortages,
995  				  int prev_busy_level, u32 *missed_ppm)
996  {
997  	u64 vrate = ioc->vtime_base_rate;
998  	u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
999  
1000  	if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
1001  		if (ioc->busy_level != prev_busy_level || nr_lagging)
1002  			trace_iocost_ioc_vrate_adj(ioc, vrate,
1003  						   missed_ppm, rq_wait_pct,
1004  						   nr_lagging, nr_shortages);
1005  
1006  		return;
1007  	}
1008  
1009  	/*
1010  	 * If vrate is out of bounds, apply clamp gradually as the
1011  	 * bounds can change abruptly.  Otherwise, apply busy_level
1012  	 * based adjustment.
1013  	 */
1014  	if (vrate < vrate_min) {
1015  		vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1016  		vrate = min(vrate, vrate_min);
1017  	} else if (vrate > vrate_max) {
1018  		vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1019  		vrate = max(vrate, vrate_max);
1020  	} else {
1021  		int idx = min_t(int, abs(ioc->busy_level),
1022  				ARRAY_SIZE(vrate_adj_pct) - 1);
1023  		u32 adj_pct = vrate_adj_pct[idx];
1024  
1025  		if (ioc->busy_level > 0)
1026  			adj_pct = 100 - adj_pct;
1027  		else
1028  			adj_pct = 100 + adj_pct;
1029  
1030  		vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1031  			      vrate_min, vrate_max);
1032  	}
1033  
1034  	trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1035  				   nr_lagging, nr_shortages);
1036  
1037  	ioc->vtime_base_rate = vrate;
1038  	ioc_refresh_margins(ioc);
1039  }
1040  
1041  /* take a snapshot of the current [v]time and vrate */
ioc_now(struct ioc * ioc,struct ioc_now * now)1042  static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1043  {
1044  	unsigned seq;
1045  	u64 vrate;
1046  
1047  	now->now_ns = blk_time_get_ns();
1048  	now->now = ktime_to_us(now->now_ns);
1049  	vrate = atomic64_read(&ioc->vtime_rate);
1050  
1051  	/*
1052  	 * The current vtime is
1053  	 *
1054  	 *   vtime at period start + (wallclock time since the start) * vrate
1055  	 *
1056  	 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1057  	 * needed, they're seqcount protected.
1058  	 */
1059  	do {
1060  		seq = read_seqcount_begin(&ioc->period_seqcount);
1061  		now->vnow = ioc->period_at_vtime +
1062  			(now->now - ioc->period_at) * vrate;
1063  	} while (read_seqcount_retry(&ioc->period_seqcount, seq));
1064  }
1065  
ioc_start_period(struct ioc * ioc,struct ioc_now * now)1066  static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1067  {
1068  	WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1069  
1070  	write_seqcount_begin(&ioc->period_seqcount);
1071  	ioc->period_at = now->now;
1072  	ioc->period_at_vtime = now->vnow;
1073  	write_seqcount_end(&ioc->period_seqcount);
1074  
1075  	ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1076  	add_timer(&ioc->timer);
1077  }
1078  
1079  /*
1080   * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1081   * weight sums and propagate upwards accordingly. If @save, the current margin
1082   * is saved to be used as reference for later inuse in-period adjustments.
1083   */
__propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1084  static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1085  				bool save, struct ioc_now *now)
1086  {
1087  	struct ioc *ioc = iocg->ioc;
1088  	int lvl;
1089  
1090  	lockdep_assert_held(&ioc->lock);
1091  
1092  	/*
1093  	 * For an active leaf node, its inuse shouldn't be zero or exceed
1094  	 * @active. An active internal node's inuse is solely determined by the
1095  	 * inuse to active ratio of its children regardless of @inuse.
1096  	 */
1097  	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1098  		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1099  					   iocg->child_active_sum);
1100  	} else {
1101  		inuse = clamp_t(u32, inuse, 1, active);
1102  	}
1103  
1104  	iocg->last_inuse = iocg->inuse;
1105  	if (save)
1106  		iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1107  
1108  	if (active == iocg->active && inuse == iocg->inuse)
1109  		return;
1110  
1111  	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1112  		struct ioc_gq *parent = iocg->ancestors[lvl];
1113  		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1114  		u32 parent_active = 0, parent_inuse = 0;
1115  
1116  		/* update the level sums */
1117  		parent->child_active_sum += (s32)(active - child->active);
1118  		parent->child_inuse_sum += (s32)(inuse - child->inuse);
1119  		/* apply the updates */
1120  		child->active = active;
1121  		child->inuse = inuse;
1122  
1123  		/*
1124  		 * The delta between inuse and active sums indicates that
1125  		 * much of weight is being given away.  Parent's inuse
1126  		 * and active should reflect the ratio.
1127  		 */
1128  		if (parent->child_active_sum) {
1129  			parent_active = parent->weight;
1130  			parent_inuse = DIV64_U64_ROUND_UP(
1131  				parent_active * parent->child_inuse_sum,
1132  				parent->child_active_sum);
1133  		}
1134  
1135  		/* do we need to keep walking up? */
1136  		if (parent_active == parent->active &&
1137  		    parent_inuse == parent->inuse)
1138  			break;
1139  
1140  		active = parent_active;
1141  		inuse = parent_inuse;
1142  	}
1143  
1144  	ioc->weights_updated = true;
1145  }
1146  
commit_weights(struct ioc * ioc)1147  static void commit_weights(struct ioc *ioc)
1148  {
1149  	lockdep_assert_held(&ioc->lock);
1150  
1151  	if (ioc->weights_updated) {
1152  		/* paired with rmb in current_hweight(), see there */
1153  		smp_wmb();
1154  		atomic_inc(&ioc->hweight_gen);
1155  		ioc->weights_updated = false;
1156  	}
1157  }
1158  
propagate_weights(struct ioc_gq * iocg,u32 active,u32 inuse,bool save,struct ioc_now * now)1159  static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1160  			      bool save, struct ioc_now *now)
1161  {
1162  	__propagate_weights(iocg, active, inuse, save, now);
1163  	commit_weights(iocg->ioc);
1164  }
1165  
current_hweight(struct ioc_gq * iocg,u32 * hw_activep,u32 * hw_inusep)1166  static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1167  {
1168  	struct ioc *ioc = iocg->ioc;
1169  	int lvl;
1170  	u32 hwa, hwi;
1171  	int ioc_gen;
1172  
1173  	/* hot path - if uptodate, use cached */
1174  	ioc_gen = atomic_read(&ioc->hweight_gen);
1175  	if (ioc_gen == iocg->hweight_gen)
1176  		goto out;
1177  
1178  	/*
1179  	 * Paired with wmb in commit_weights(). If we saw the updated
1180  	 * hweight_gen, all the weight updates from __propagate_weights() are
1181  	 * visible too.
1182  	 *
1183  	 * We can race with weight updates during calculation and get it
1184  	 * wrong.  However, hweight_gen would have changed and a future
1185  	 * reader will recalculate and we're guaranteed to discard the
1186  	 * wrong result soon.
1187  	 */
1188  	smp_rmb();
1189  
1190  	hwa = hwi = WEIGHT_ONE;
1191  	for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1192  		struct ioc_gq *parent = iocg->ancestors[lvl];
1193  		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1194  		u64 active_sum = READ_ONCE(parent->child_active_sum);
1195  		u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1196  		u32 active = READ_ONCE(child->active);
1197  		u32 inuse = READ_ONCE(child->inuse);
1198  
1199  		/* we can race with deactivations and either may read as zero */
1200  		if (!active_sum || !inuse_sum)
1201  			continue;
1202  
1203  		active_sum = max_t(u64, active, active_sum);
1204  		hwa = div64_u64((u64)hwa * active, active_sum);
1205  
1206  		inuse_sum = max_t(u64, inuse, inuse_sum);
1207  		hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1208  	}
1209  
1210  	iocg->hweight_active = max_t(u32, hwa, 1);
1211  	iocg->hweight_inuse = max_t(u32, hwi, 1);
1212  	iocg->hweight_gen = ioc_gen;
1213  out:
1214  	if (hw_activep)
1215  		*hw_activep = iocg->hweight_active;
1216  	if (hw_inusep)
1217  		*hw_inusep = iocg->hweight_inuse;
1218  }
1219  
1220  /*
1221   * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1222   * other weights stay unchanged.
1223   */
current_hweight_max(struct ioc_gq * iocg)1224  static u32 current_hweight_max(struct ioc_gq *iocg)
1225  {
1226  	u32 hwm = WEIGHT_ONE;
1227  	u32 inuse = iocg->active;
1228  	u64 child_inuse_sum;
1229  	int lvl;
1230  
1231  	lockdep_assert_held(&iocg->ioc->lock);
1232  
1233  	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1234  		struct ioc_gq *parent = iocg->ancestors[lvl];
1235  		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1236  
1237  		child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1238  		hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1239  		inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1240  					   parent->child_active_sum);
1241  	}
1242  
1243  	return max_t(u32, hwm, 1);
1244  }
1245  
weight_updated(struct ioc_gq * iocg,struct ioc_now * now)1246  static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1247  {
1248  	struct ioc *ioc = iocg->ioc;
1249  	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1250  	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1251  	u32 weight;
1252  
1253  	lockdep_assert_held(&ioc->lock);
1254  
1255  	weight = iocg->cfg_weight ?: iocc->dfl_weight;
1256  	if (weight != iocg->weight && iocg->active)
1257  		propagate_weights(iocg, weight, iocg->inuse, true, now);
1258  	iocg->weight = weight;
1259  }
1260  
iocg_activate(struct ioc_gq * iocg,struct ioc_now * now)1261  static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1262  {
1263  	struct ioc *ioc = iocg->ioc;
1264  	u64 __maybe_unused last_period, cur_period;
1265  	u64 vtime, vtarget;
1266  	int i;
1267  
1268  	/*
1269  	 * If seem to be already active, just update the stamp to tell the
1270  	 * timer that we're still active.  We don't mind occassional races.
1271  	 */
1272  	if (!list_empty(&iocg->active_list)) {
1273  		ioc_now(ioc, now);
1274  		cur_period = atomic64_read(&ioc->cur_period);
1275  		if (atomic64_read(&iocg->active_period) != cur_period)
1276  			atomic64_set(&iocg->active_period, cur_period);
1277  		return true;
1278  	}
1279  
1280  	/* racy check on internal node IOs, treat as root level IOs */
1281  	if (iocg->child_active_sum)
1282  		return false;
1283  
1284  	spin_lock_irq(&ioc->lock);
1285  
1286  	ioc_now(ioc, now);
1287  
1288  	/* update period */
1289  	cur_period = atomic64_read(&ioc->cur_period);
1290  	last_period = atomic64_read(&iocg->active_period);
1291  	atomic64_set(&iocg->active_period, cur_period);
1292  
1293  	/* already activated or breaking leaf-only constraint? */
1294  	if (!list_empty(&iocg->active_list))
1295  		goto succeed_unlock;
1296  	for (i = iocg->level - 1; i > 0; i--)
1297  		if (!list_empty(&iocg->ancestors[i]->active_list))
1298  			goto fail_unlock;
1299  
1300  	if (iocg->child_active_sum)
1301  		goto fail_unlock;
1302  
1303  	/*
1304  	 * Always start with the target budget. On deactivation, we throw away
1305  	 * anything above it.
1306  	 */
1307  	vtarget = now->vnow - ioc->margins.target;
1308  	vtime = atomic64_read(&iocg->vtime);
1309  
1310  	atomic64_add(vtarget - vtime, &iocg->vtime);
1311  	atomic64_add(vtarget - vtime, &iocg->done_vtime);
1312  	vtime = vtarget;
1313  
1314  	/*
1315  	 * Activate, propagate weight and start period timer if not
1316  	 * running.  Reset hweight_gen to avoid accidental match from
1317  	 * wrapping.
1318  	 */
1319  	iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1320  	list_add(&iocg->active_list, &ioc->active_iocgs);
1321  
1322  	propagate_weights(iocg, iocg->weight,
1323  			  iocg->last_inuse ?: iocg->weight, true, now);
1324  
1325  	TRACE_IOCG_PATH(iocg_activate, iocg, now,
1326  			last_period, cur_period, vtime);
1327  
1328  	iocg->activated_at = now->now;
1329  
1330  	if (ioc->running == IOC_IDLE) {
1331  		ioc->running = IOC_RUNNING;
1332  		ioc->dfgv_period_at = now->now;
1333  		ioc->dfgv_period_rem = 0;
1334  		ioc_start_period(ioc, now);
1335  	}
1336  
1337  succeed_unlock:
1338  	spin_unlock_irq(&ioc->lock);
1339  	return true;
1340  
1341  fail_unlock:
1342  	spin_unlock_irq(&ioc->lock);
1343  	return false;
1344  }
1345  
iocg_kick_delay(struct ioc_gq * iocg,struct ioc_now * now)1346  static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1347  {
1348  	struct ioc *ioc = iocg->ioc;
1349  	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1350  	u64 tdelta, delay, new_delay, shift;
1351  	s64 vover, vover_pct;
1352  	u32 hwa;
1353  
1354  	lockdep_assert_held(&iocg->waitq.lock);
1355  
1356  	/*
1357  	 * If the delay is set by another CPU, we may be in the past. No need to
1358  	 * change anything if so. This avoids decay calculation underflow.
1359  	 */
1360  	if (time_before64(now->now, iocg->delay_at))
1361  		return false;
1362  
1363  	/* calculate the current delay in effect - 1/2 every second */
1364  	tdelta = now->now - iocg->delay_at;
1365  	shift = div64_u64(tdelta, USEC_PER_SEC);
1366  	if (iocg->delay && shift < BITS_PER_LONG)
1367  		delay = iocg->delay >> shift;
1368  	else
1369  		delay = 0;
1370  
1371  	/* calculate the new delay from the debt amount */
1372  	current_hweight(iocg, &hwa, NULL);
1373  	vover = atomic64_read(&iocg->vtime) +
1374  		abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1375  	vover_pct = div64_s64(100 * vover,
1376  			      ioc->period_us * ioc->vtime_base_rate);
1377  
1378  	if (vover_pct <= MIN_DELAY_THR_PCT)
1379  		new_delay = 0;
1380  	else if (vover_pct >= MAX_DELAY_THR_PCT)
1381  		new_delay = MAX_DELAY;
1382  	else
1383  		new_delay = MIN_DELAY +
1384  			div_u64((MAX_DELAY - MIN_DELAY) *
1385  				(vover_pct - MIN_DELAY_THR_PCT),
1386  				MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1387  
1388  	/* pick the higher one and apply */
1389  	if (new_delay > delay) {
1390  		iocg->delay = new_delay;
1391  		iocg->delay_at = now->now;
1392  		delay = new_delay;
1393  	}
1394  
1395  	if (delay >= MIN_DELAY) {
1396  		if (!iocg->indelay_since)
1397  			iocg->indelay_since = now->now;
1398  		blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1399  		return true;
1400  	} else {
1401  		if (iocg->indelay_since) {
1402  			iocg->stat.indelay_us += now->now - iocg->indelay_since;
1403  			iocg->indelay_since = 0;
1404  		}
1405  		iocg->delay = 0;
1406  		blkcg_clear_delay(blkg);
1407  		return false;
1408  	}
1409  }
1410  
iocg_incur_debt(struct ioc_gq * iocg,u64 abs_cost,struct ioc_now * now)1411  static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1412  			    struct ioc_now *now)
1413  {
1414  	struct iocg_pcpu_stat *gcs;
1415  
1416  	lockdep_assert_held(&iocg->ioc->lock);
1417  	lockdep_assert_held(&iocg->waitq.lock);
1418  	WARN_ON_ONCE(list_empty(&iocg->active_list));
1419  
1420  	/*
1421  	 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1422  	 * inuse donating all of it share to others until its debt is paid off.
1423  	 */
1424  	if (!iocg->abs_vdebt && abs_cost) {
1425  		iocg->indebt_since = now->now;
1426  		propagate_weights(iocg, iocg->active, 0, false, now);
1427  	}
1428  
1429  	iocg->abs_vdebt += abs_cost;
1430  
1431  	gcs = get_cpu_ptr(iocg->pcpu_stat);
1432  	local64_add(abs_cost, &gcs->abs_vusage);
1433  	put_cpu_ptr(gcs);
1434  }
1435  
iocg_pay_debt(struct ioc_gq * iocg,u64 abs_vpay,struct ioc_now * now)1436  static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1437  			  struct ioc_now *now)
1438  {
1439  	lockdep_assert_held(&iocg->ioc->lock);
1440  	lockdep_assert_held(&iocg->waitq.lock);
1441  
1442  	/*
1443  	 * make sure that nobody messed with @iocg. Check iocg->pd.online
1444  	 * to avoid warn when removing blkcg or disk.
1445  	 */
1446  	WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
1447  	WARN_ON_ONCE(iocg->inuse > 1);
1448  
1449  	iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1450  
1451  	/* if debt is paid in full, restore inuse */
1452  	if (!iocg->abs_vdebt) {
1453  		iocg->stat.indebt_us += now->now - iocg->indebt_since;
1454  		iocg->indebt_since = 0;
1455  
1456  		propagate_weights(iocg, iocg->active, iocg->last_inuse,
1457  				  false, now);
1458  	}
1459  }
1460  
iocg_wake_fn(struct wait_queue_entry * wq_entry,unsigned mode,int flags,void * key)1461  static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1462  			int flags, void *key)
1463  {
1464  	struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1465  	struct iocg_wake_ctx *ctx = key;
1466  	u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1467  
1468  	ctx->vbudget -= cost;
1469  
1470  	if (ctx->vbudget < 0)
1471  		return -1;
1472  
1473  	iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1474  	wait->committed = true;
1475  
1476  	/*
1477  	 * autoremove_wake_function() removes the wait entry only when it
1478  	 * actually changed the task state. We want the wait always removed.
1479  	 * Remove explicitly and use default_wake_function(). Note that the
1480  	 * order of operations is important as finish_wait() tests whether
1481  	 * @wq_entry is removed without grabbing the lock.
1482  	 */
1483  	default_wake_function(wq_entry, mode, flags, key);
1484  	list_del_init_careful(&wq_entry->entry);
1485  	return 0;
1486  }
1487  
1488  /*
1489   * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1490   * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1491   * addition to iocg->waitq.lock.
1492   */
iocg_kick_waitq(struct ioc_gq * iocg,bool pay_debt,struct ioc_now * now)1493  static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1494  			    struct ioc_now *now)
1495  {
1496  	struct ioc *ioc = iocg->ioc;
1497  	struct iocg_wake_ctx ctx = { .iocg = iocg };
1498  	u64 vshortage, expires, oexpires;
1499  	s64 vbudget;
1500  	u32 hwa;
1501  
1502  	lockdep_assert_held(&iocg->waitq.lock);
1503  
1504  	current_hweight(iocg, &hwa, NULL);
1505  	vbudget = now->vnow - atomic64_read(&iocg->vtime);
1506  
1507  	/* pay off debt */
1508  	if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1509  		u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1510  		u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1511  		u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1512  
1513  		lockdep_assert_held(&ioc->lock);
1514  
1515  		atomic64_add(vpay, &iocg->vtime);
1516  		atomic64_add(vpay, &iocg->done_vtime);
1517  		iocg_pay_debt(iocg, abs_vpay, now);
1518  		vbudget -= vpay;
1519  	}
1520  
1521  	if (iocg->abs_vdebt || iocg->delay)
1522  		iocg_kick_delay(iocg, now);
1523  
1524  	/*
1525  	 * Debt can still be outstanding if we haven't paid all yet or the
1526  	 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1527  	 * under debt. Make sure @vbudget reflects the outstanding amount and is
1528  	 * not positive.
1529  	 */
1530  	if (iocg->abs_vdebt) {
1531  		s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1532  		vbudget = min_t(s64, 0, vbudget - vdebt);
1533  	}
1534  
1535  	/*
1536  	 * Wake up the ones which are due and see how much vtime we'll need for
1537  	 * the next one. As paying off debt restores hw_inuse, it must be read
1538  	 * after the above debt payment.
1539  	 */
1540  	ctx.vbudget = vbudget;
1541  	current_hweight(iocg, NULL, &ctx.hw_inuse);
1542  
1543  	__wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1544  
1545  	if (!waitqueue_active(&iocg->waitq)) {
1546  		if (iocg->wait_since) {
1547  			iocg->stat.wait_us += now->now - iocg->wait_since;
1548  			iocg->wait_since = 0;
1549  		}
1550  		return;
1551  	}
1552  
1553  	if (!iocg->wait_since)
1554  		iocg->wait_since = now->now;
1555  
1556  	if (WARN_ON_ONCE(ctx.vbudget >= 0))
1557  		return;
1558  
1559  	/* determine next wakeup, add a timer margin to guarantee chunking */
1560  	vshortage = -ctx.vbudget;
1561  	expires = now->now_ns +
1562  		DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1563  		NSEC_PER_USEC;
1564  	expires += ioc->timer_slack_ns;
1565  
1566  	/* if already active and close enough, don't bother */
1567  	oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1568  	if (hrtimer_is_queued(&iocg->waitq_timer) &&
1569  	    abs(oexpires - expires) <= ioc->timer_slack_ns)
1570  		return;
1571  
1572  	hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1573  			       ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1574  }
1575  
iocg_waitq_timer_fn(struct hrtimer * timer)1576  static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1577  {
1578  	struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1579  	bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1580  	struct ioc_now now;
1581  	unsigned long flags;
1582  
1583  	ioc_now(iocg->ioc, &now);
1584  
1585  	iocg_lock(iocg, pay_debt, &flags);
1586  	iocg_kick_waitq(iocg, pay_debt, &now);
1587  	iocg_unlock(iocg, pay_debt, &flags);
1588  
1589  	return HRTIMER_NORESTART;
1590  }
1591  
ioc_lat_stat(struct ioc * ioc,u32 * missed_ppm_ar,u32 * rq_wait_pct_p)1592  static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1593  {
1594  	u32 nr_met[2] = { };
1595  	u32 nr_missed[2] = { };
1596  	u64 rq_wait_ns = 0;
1597  	int cpu, rw;
1598  
1599  	for_each_online_cpu(cpu) {
1600  		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1601  		u64 this_rq_wait_ns;
1602  
1603  		for (rw = READ; rw <= WRITE; rw++) {
1604  			u32 this_met = local_read(&stat->missed[rw].nr_met);
1605  			u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1606  
1607  			nr_met[rw] += this_met - stat->missed[rw].last_met;
1608  			nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1609  			stat->missed[rw].last_met = this_met;
1610  			stat->missed[rw].last_missed = this_missed;
1611  		}
1612  
1613  		this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1614  		rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1615  		stat->last_rq_wait_ns = this_rq_wait_ns;
1616  	}
1617  
1618  	for (rw = READ; rw <= WRITE; rw++) {
1619  		if (nr_met[rw] + nr_missed[rw])
1620  			missed_ppm_ar[rw] =
1621  				DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1622  						   nr_met[rw] + nr_missed[rw]);
1623  		else
1624  			missed_ppm_ar[rw] = 0;
1625  	}
1626  
1627  	*rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1628  				   ioc->period_us * NSEC_PER_USEC);
1629  }
1630  
1631  /* was iocg idle this period? */
iocg_is_idle(struct ioc_gq * iocg)1632  static bool iocg_is_idle(struct ioc_gq *iocg)
1633  {
1634  	struct ioc *ioc = iocg->ioc;
1635  
1636  	/* did something get issued this period? */
1637  	if (atomic64_read(&iocg->active_period) ==
1638  	    atomic64_read(&ioc->cur_period))
1639  		return false;
1640  
1641  	/* is something in flight? */
1642  	if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1643  		return false;
1644  
1645  	return true;
1646  }
1647  
1648  /*
1649   * Call this function on the target leaf @iocg's to build pre-order traversal
1650   * list of all the ancestors in @inner_walk. The inner nodes are linked through
1651   * ->walk_list and the caller is responsible for dissolving the list after use.
1652   */
iocg_build_inner_walk(struct ioc_gq * iocg,struct list_head * inner_walk)1653  static void iocg_build_inner_walk(struct ioc_gq *iocg,
1654  				  struct list_head *inner_walk)
1655  {
1656  	int lvl;
1657  
1658  	WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1659  
1660  	/* find the first ancestor which hasn't been visited yet */
1661  	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1662  		if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1663  			break;
1664  	}
1665  
1666  	/* walk down and visit the inner nodes to get pre-order traversal */
1667  	while (++lvl <= iocg->level - 1) {
1668  		struct ioc_gq *inner = iocg->ancestors[lvl];
1669  
1670  		/* record traversal order */
1671  		list_add_tail(&inner->walk_list, inner_walk);
1672  	}
1673  }
1674  
1675  /* propagate the deltas to the parent */
iocg_flush_stat_upward(struct ioc_gq * iocg)1676  static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1677  {
1678  	if (iocg->level > 0) {
1679  		struct iocg_stat *parent_stat =
1680  			&iocg->ancestors[iocg->level - 1]->stat;
1681  
1682  		parent_stat->usage_us +=
1683  			iocg->stat.usage_us - iocg->last_stat.usage_us;
1684  		parent_stat->wait_us +=
1685  			iocg->stat.wait_us - iocg->last_stat.wait_us;
1686  		parent_stat->indebt_us +=
1687  			iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1688  		parent_stat->indelay_us +=
1689  			iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1690  	}
1691  
1692  	iocg->last_stat = iocg->stat;
1693  }
1694  
1695  /* collect per-cpu counters and propagate the deltas to the parent */
iocg_flush_stat_leaf(struct ioc_gq * iocg,struct ioc_now * now)1696  static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1697  {
1698  	struct ioc *ioc = iocg->ioc;
1699  	u64 abs_vusage = 0;
1700  	u64 vusage_delta;
1701  	int cpu;
1702  
1703  	lockdep_assert_held(&iocg->ioc->lock);
1704  
1705  	/* collect per-cpu counters */
1706  	for_each_possible_cpu(cpu) {
1707  		abs_vusage += local64_read(
1708  				per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1709  	}
1710  	vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1711  	iocg->last_stat_abs_vusage = abs_vusage;
1712  
1713  	iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1714  	iocg->stat.usage_us += iocg->usage_delta_us;
1715  
1716  	iocg_flush_stat_upward(iocg);
1717  }
1718  
1719  /* get stat counters ready for reading on all active iocgs */
iocg_flush_stat(struct list_head * target_iocgs,struct ioc_now * now)1720  static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1721  {
1722  	LIST_HEAD(inner_walk);
1723  	struct ioc_gq *iocg, *tiocg;
1724  
1725  	/* flush leaves and build inner node walk list */
1726  	list_for_each_entry(iocg, target_iocgs, active_list) {
1727  		iocg_flush_stat_leaf(iocg, now);
1728  		iocg_build_inner_walk(iocg, &inner_walk);
1729  	}
1730  
1731  	/* keep flushing upwards by walking the inner list backwards */
1732  	list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1733  		iocg_flush_stat_upward(iocg);
1734  		list_del_init(&iocg->walk_list);
1735  	}
1736  }
1737  
1738  /*
1739   * Determine what @iocg's hweight_inuse should be after donating unused
1740   * capacity. @hwm is the upper bound and used to signal no donation. This
1741   * function also throws away @iocg's excess budget.
1742   */
hweight_after_donation(struct ioc_gq * iocg,u32 old_hwi,u32 hwm,u32 usage,struct ioc_now * now)1743  static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1744  				  u32 usage, struct ioc_now *now)
1745  {
1746  	struct ioc *ioc = iocg->ioc;
1747  	u64 vtime = atomic64_read(&iocg->vtime);
1748  	s64 excess, delta, target, new_hwi;
1749  
1750  	/* debt handling owns inuse for debtors */
1751  	if (iocg->abs_vdebt)
1752  		return 1;
1753  
1754  	/* see whether minimum margin requirement is met */
1755  	if (waitqueue_active(&iocg->waitq) ||
1756  	    time_after64(vtime, now->vnow - ioc->margins.min))
1757  		return hwm;
1758  
1759  	/* throw away excess above target */
1760  	excess = now->vnow - vtime - ioc->margins.target;
1761  	if (excess > 0) {
1762  		atomic64_add(excess, &iocg->vtime);
1763  		atomic64_add(excess, &iocg->done_vtime);
1764  		vtime += excess;
1765  		ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1766  	}
1767  
1768  	/*
1769  	 * Let's say the distance between iocg's and device's vtimes as a
1770  	 * fraction of period duration is delta. Assuming that the iocg will
1771  	 * consume the usage determined above, we want to determine new_hwi so
1772  	 * that delta equals MARGIN_TARGET at the end of the next period.
1773  	 *
1774  	 * We need to execute usage worth of IOs while spending the sum of the
1775  	 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1776  	 * (delta):
1777  	 *
1778  	 *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1779  	 *
1780  	 * Therefore, the new_hwi is:
1781  	 *
1782  	 *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1783  	 */
1784  	delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1785  			  now->vnow - ioc->period_at_vtime);
1786  	target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1787  	new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1788  
1789  	return clamp_t(s64, new_hwi, 1, hwm);
1790  }
1791  
1792  /*
1793   * For work-conservation, an iocg which isn't using all of its share should
1794   * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1795   * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1796   *
1797   * #1 is mathematically simpler but has the drawback of requiring synchronous
1798   * global hweight_inuse updates when idle iocg's get activated or inuse weights
1799   * change due to donation snapbacks as it has the possibility of grossly
1800   * overshooting what's allowed by the model and vrate.
1801   *
1802   * #2 is inherently safe with local operations. The donating iocg can easily
1803   * snap back to higher weights when needed without worrying about impacts on
1804   * other nodes as the impacts will be inherently correct. This also makes idle
1805   * iocg activations safe. The only effect activations have is decreasing
1806   * hweight_inuse of others, the right solution to which is for those iocgs to
1807   * snap back to higher weights.
1808   *
1809   * So, we go with #2. The challenge is calculating how each donating iocg's
1810   * inuse should be adjusted to achieve the target donation amounts. This is done
1811   * using Andy's method described in the following pdf.
1812   *
1813   *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1814   *
1815   * Given the weights and target after-donation hweight_inuse values, Andy's
1816   * method determines how the proportional distribution should look like at each
1817   * sibling level to maintain the relative relationship between all non-donating
1818   * pairs. To roughly summarize, it divides the tree into donating and
1819   * non-donating parts, calculates global donation rate which is used to
1820   * determine the target hweight_inuse for each node, and then derives per-level
1821   * proportions.
1822   *
1823   * The following pdf shows that global distribution calculated this way can be
1824   * achieved by scaling inuse weights of donating leaves and propagating the
1825   * adjustments upwards proportionally.
1826   *
1827   *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1828   *
1829   * Combining the above two, we can determine how each leaf iocg's inuse should
1830   * be adjusted to achieve the target donation.
1831   *
1832   *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1833   *
1834   * The inline comments use symbols from the last pdf.
1835   *
1836   *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1837   *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1838   *   t is the sum of the absolute budgets of donating nodes in the subtree.
1839   *   w is the weight of the node. w = w_f + w_t
1840   *   w_f is the non-donating portion of w. w_f = w * f / b
1841   *   w_b is the donating portion of w. w_t = w * t / b
1842   *   s is the sum of all sibling weights. s = Sum(w) for siblings
1843   *   s_f and s_t are the non-donating and donating portions of s.
1844   *
1845   * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1846   * w_pt is the donating portion of the parent's weight and w'_pt the same value
1847   * after adjustments. Subscript r denotes the root node's values.
1848   */
transfer_surpluses(struct list_head * surpluses,struct ioc_now * now)1849  static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1850  {
1851  	LIST_HEAD(over_hwa);
1852  	LIST_HEAD(inner_walk);
1853  	struct ioc_gq *iocg, *tiocg, *root_iocg;
1854  	u32 after_sum, over_sum, over_target, gamma;
1855  
1856  	/*
1857  	 * It's pretty unlikely but possible for the total sum of
1858  	 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1859  	 * confuse the following calculations. If such condition is detected,
1860  	 * scale down everyone over its full share equally to keep the sum below
1861  	 * WEIGHT_ONE.
1862  	 */
1863  	after_sum = 0;
1864  	over_sum = 0;
1865  	list_for_each_entry(iocg, surpluses, surplus_list) {
1866  		u32 hwa;
1867  
1868  		current_hweight(iocg, &hwa, NULL);
1869  		after_sum += iocg->hweight_after_donation;
1870  
1871  		if (iocg->hweight_after_donation > hwa) {
1872  			over_sum += iocg->hweight_after_donation;
1873  			list_add(&iocg->walk_list, &over_hwa);
1874  		}
1875  	}
1876  
1877  	if (after_sum >= WEIGHT_ONE) {
1878  		/*
1879  		 * The delta should be deducted from the over_sum, calculate
1880  		 * target over_sum value.
1881  		 */
1882  		u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1883  		WARN_ON_ONCE(over_sum <= over_delta);
1884  		over_target = over_sum - over_delta;
1885  	} else {
1886  		over_target = 0;
1887  	}
1888  
1889  	list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1890  		if (over_target)
1891  			iocg->hweight_after_donation =
1892  				div_u64((u64)iocg->hweight_after_donation *
1893  					over_target, over_sum);
1894  		list_del_init(&iocg->walk_list);
1895  	}
1896  
1897  	/*
1898  	 * Build pre-order inner node walk list and prepare for donation
1899  	 * adjustment calculations.
1900  	 */
1901  	list_for_each_entry(iocg, surpluses, surplus_list) {
1902  		iocg_build_inner_walk(iocg, &inner_walk);
1903  	}
1904  
1905  	root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1906  	WARN_ON_ONCE(root_iocg->level > 0);
1907  
1908  	list_for_each_entry(iocg, &inner_walk, walk_list) {
1909  		iocg->child_adjusted_sum = 0;
1910  		iocg->hweight_donating = 0;
1911  		iocg->hweight_after_donation = 0;
1912  	}
1913  
1914  	/*
1915  	 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1916  	 * up the hierarchy.
1917  	 */
1918  	list_for_each_entry(iocg, surpluses, surplus_list) {
1919  		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1920  
1921  		parent->hweight_donating += iocg->hweight_donating;
1922  		parent->hweight_after_donation += iocg->hweight_after_donation;
1923  	}
1924  
1925  	list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1926  		if (iocg->level > 0) {
1927  			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1928  
1929  			parent->hweight_donating += iocg->hweight_donating;
1930  			parent->hweight_after_donation += iocg->hweight_after_donation;
1931  		}
1932  	}
1933  
1934  	/*
1935  	 * Calculate inner hwa's (b) and make sure the donation values are
1936  	 * within the accepted ranges as we're doing low res calculations with
1937  	 * roundups.
1938  	 */
1939  	list_for_each_entry(iocg, &inner_walk, walk_list) {
1940  		if (iocg->level) {
1941  			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1942  
1943  			iocg->hweight_active = DIV64_U64_ROUND_UP(
1944  				(u64)parent->hweight_active * iocg->active,
1945  				parent->child_active_sum);
1946  
1947  		}
1948  
1949  		iocg->hweight_donating = min(iocg->hweight_donating,
1950  					     iocg->hweight_active);
1951  		iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1952  						   iocg->hweight_donating - 1);
1953  		if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1954  				 iocg->hweight_donating <= 1 ||
1955  				 iocg->hweight_after_donation == 0)) {
1956  			pr_warn("iocg: invalid donation weights in ");
1957  			pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1958  			pr_cont(": active=%u donating=%u after=%u\n",
1959  				iocg->hweight_active, iocg->hweight_donating,
1960  				iocg->hweight_after_donation);
1961  		}
1962  	}
1963  
1964  	/*
1965  	 * Calculate the global donation rate (gamma) - the rate to adjust
1966  	 * non-donating budgets by.
1967  	 *
1968  	 * No need to use 64bit multiplication here as the first operand is
1969  	 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1970  	 *
1971  	 * We know that there are beneficiary nodes and the sum of the donating
1972  	 * hweights can't be whole; however, due to the round-ups during hweight
1973  	 * calculations, root_iocg->hweight_donating might still end up equal to
1974  	 * or greater than whole. Limit the range when calculating the divider.
1975  	 *
1976  	 * gamma = (1 - t_r') / (1 - t_r)
1977  	 */
1978  	gamma = DIV_ROUND_UP(
1979  		(WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1980  		WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1981  
1982  	/*
1983  	 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1984  	 * nodes.
1985  	 */
1986  	list_for_each_entry(iocg, &inner_walk, walk_list) {
1987  		struct ioc_gq *parent;
1988  		u32 inuse, wpt, wptp;
1989  		u64 st, sf;
1990  
1991  		if (iocg->level == 0) {
1992  			/* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1993  			iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1994  				iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1995  				WEIGHT_ONE - iocg->hweight_after_donation);
1996  			continue;
1997  		}
1998  
1999  		parent = iocg->ancestors[iocg->level - 1];
2000  
2001  		/* b' = gamma * b_f + b_t' */
2002  		iocg->hweight_inuse = DIV64_U64_ROUND_UP(
2003  			(u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
2004  			WEIGHT_ONE) + iocg->hweight_after_donation;
2005  
2006  		/* w' = s' * b' / b'_p */
2007  		inuse = DIV64_U64_ROUND_UP(
2008  			(u64)parent->child_adjusted_sum * iocg->hweight_inuse,
2009  			parent->hweight_inuse);
2010  
2011  		/* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
2012  		st = DIV64_U64_ROUND_UP(
2013  			iocg->child_active_sum * iocg->hweight_donating,
2014  			iocg->hweight_active);
2015  		sf = iocg->child_active_sum - st;
2016  		wpt = DIV64_U64_ROUND_UP(
2017  			(u64)iocg->active * iocg->hweight_donating,
2018  			iocg->hweight_active);
2019  		wptp = DIV64_U64_ROUND_UP(
2020  			(u64)inuse * iocg->hweight_after_donation,
2021  			iocg->hweight_inuse);
2022  
2023  		iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2024  	}
2025  
2026  	/*
2027  	 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2028  	 * we can finally determine leaf adjustments.
2029  	 */
2030  	list_for_each_entry(iocg, surpluses, surplus_list) {
2031  		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2032  		u32 inuse;
2033  
2034  		/*
2035  		 * In-debt iocgs participated in the donation calculation with
2036  		 * the minimum target hweight_inuse. Configuring inuse
2037  		 * accordingly would work fine but debt handling expects
2038  		 * @iocg->inuse stay at the minimum and we don't wanna
2039  		 * interfere.
2040  		 */
2041  		if (iocg->abs_vdebt) {
2042  			WARN_ON_ONCE(iocg->inuse > 1);
2043  			continue;
2044  		}
2045  
2046  		/* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2047  		inuse = DIV64_U64_ROUND_UP(
2048  			parent->child_adjusted_sum * iocg->hweight_after_donation,
2049  			parent->hweight_inuse);
2050  
2051  		TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2052  				iocg->inuse, inuse,
2053  				iocg->hweight_inuse,
2054  				iocg->hweight_after_donation);
2055  
2056  		__propagate_weights(iocg, iocg->active, inuse, true, now);
2057  	}
2058  
2059  	/* walk list should be dissolved after use */
2060  	list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2061  		list_del_init(&iocg->walk_list);
2062  }
2063  
2064  /*
2065   * A low weight iocg can amass a large amount of debt, for example, when
2066   * anonymous memory gets reclaimed aggressively. If the system has a lot of
2067   * memory paired with a slow IO device, the debt can span multiple seconds or
2068   * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2069   * up blocked paying its debt while the IO device is idle.
2070   *
2071   * The following protects against such cases. If the device has been
2072   * sufficiently idle for a while, the debts are halved and delays are
2073   * recalculated.
2074   */
ioc_forgive_debts(struct ioc * ioc,u64 usage_us_sum,int nr_debtors,struct ioc_now * now)2075  static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2076  			      struct ioc_now *now)
2077  {
2078  	struct ioc_gq *iocg;
2079  	u64 dur, usage_pct, nr_cycles, nr_cycles_shift;
2080  
2081  	/* if no debtor, reset the cycle */
2082  	if (!nr_debtors) {
2083  		ioc->dfgv_period_at = now->now;
2084  		ioc->dfgv_period_rem = 0;
2085  		ioc->dfgv_usage_us_sum = 0;
2086  		return;
2087  	}
2088  
2089  	/*
2090  	 * Debtors can pass through a lot of writes choking the device and we
2091  	 * don't want to be forgiving debts while the device is struggling from
2092  	 * write bursts. If we're missing latency targets, consider the device
2093  	 * fully utilized.
2094  	 */
2095  	if (ioc->busy_level > 0)
2096  		usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2097  
2098  	ioc->dfgv_usage_us_sum += usage_us_sum;
2099  	if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2100  		return;
2101  
2102  	/*
2103  	 * At least DFGV_PERIOD has passed since the last period. Calculate the
2104  	 * average usage and reset the period counters.
2105  	 */
2106  	dur = now->now - ioc->dfgv_period_at;
2107  	usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2108  
2109  	ioc->dfgv_period_at = now->now;
2110  	ioc->dfgv_usage_us_sum = 0;
2111  
2112  	/* if was too busy, reset everything */
2113  	if (usage_pct > DFGV_USAGE_PCT) {
2114  		ioc->dfgv_period_rem = 0;
2115  		return;
2116  	}
2117  
2118  	/*
2119  	 * Usage is lower than threshold. Let's forgive some debts. Debt
2120  	 * forgiveness runs off of the usual ioc timer but its period usually
2121  	 * doesn't match ioc's. Compensate the difference by performing the
2122  	 * reduction as many times as would fit in the duration since the last
2123  	 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2124  	 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2125  	 * reductions is doubled.
2126  	 */
2127  	nr_cycles = dur + ioc->dfgv_period_rem;
2128  	ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2129  
2130  	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2131  		u64 __maybe_unused old_debt, __maybe_unused old_delay;
2132  
2133  		if (!iocg->abs_vdebt && !iocg->delay)
2134  			continue;
2135  
2136  		spin_lock(&iocg->waitq.lock);
2137  
2138  		old_debt = iocg->abs_vdebt;
2139  		old_delay = iocg->delay;
2140  
2141  		nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1);
2142  		if (iocg->abs_vdebt)
2143  			iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1;
2144  
2145  		if (iocg->delay)
2146  			iocg->delay = iocg->delay >> nr_cycles_shift ?: 1;
2147  
2148  		iocg_kick_waitq(iocg, true, now);
2149  
2150  		TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2151  				old_debt, iocg->abs_vdebt,
2152  				old_delay, iocg->delay);
2153  
2154  		spin_unlock(&iocg->waitq.lock);
2155  	}
2156  }
2157  
2158  /*
2159   * Check the active iocgs' state to avoid oversleeping and deactive
2160   * idle iocgs.
2161   *
2162   * Since waiters determine the sleep durations based on the vrate
2163   * they saw at the time of sleep, if vrate has increased, some
2164   * waiters could be sleeping for too long. Wake up tardy waiters
2165   * which should have woken up in the last period and expire idle
2166   * iocgs.
2167   */
ioc_check_iocgs(struct ioc * ioc,struct ioc_now * now)2168  static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2169  {
2170  	int nr_debtors = 0;
2171  	struct ioc_gq *iocg, *tiocg;
2172  
2173  	list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2174  		if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2175  		    !iocg->delay && !iocg_is_idle(iocg))
2176  			continue;
2177  
2178  		spin_lock(&iocg->waitq.lock);
2179  
2180  		/* flush wait and indebt stat deltas */
2181  		if (iocg->wait_since) {
2182  			iocg->stat.wait_us += now->now - iocg->wait_since;
2183  			iocg->wait_since = now->now;
2184  		}
2185  		if (iocg->indebt_since) {
2186  			iocg->stat.indebt_us +=
2187  				now->now - iocg->indebt_since;
2188  			iocg->indebt_since = now->now;
2189  		}
2190  		if (iocg->indelay_since) {
2191  			iocg->stat.indelay_us +=
2192  				now->now - iocg->indelay_since;
2193  			iocg->indelay_since = now->now;
2194  		}
2195  
2196  		if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2197  		    iocg->delay) {
2198  			/* might be oversleeping vtime / hweight changes, kick */
2199  			iocg_kick_waitq(iocg, true, now);
2200  			if (iocg->abs_vdebt || iocg->delay)
2201  				nr_debtors++;
2202  		} else if (iocg_is_idle(iocg)) {
2203  			/* no waiter and idle, deactivate */
2204  			u64 vtime = atomic64_read(&iocg->vtime);
2205  			s64 excess;
2206  
2207  			/*
2208  			 * @iocg has been inactive for a full duration and will
2209  			 * have a high budget. Account anything above target as
2210  			 * error and throw away. On reactivation, it'll start
2211  			 * with the target budget.
2212  			 */
2213  			excess = now->vnow - vtime - ioc->margins.target;
2214  			if (excess > 0) {
2215  				u32 old_hwi;
2216  
2217  				current_hweight(iocg, NULL, &old_hwi);
2218  				ioc->vtime_err -= div64_u64(excess * old_hwi,
2219  							    WEIGHT_ONE);
2220  			}
2221  
2222  			TRACE_IOCG_PATH(iocg_idle, iocg, now,
2223  					atomic64_read(&iocg->active_period),
2224  					atomic64_read(&ioc->cur_period), vtime);
2225  			__propagate_weights(iocg, 0, 0, false, now);
2226  			list_del_init(&iocg->active_list);
2227  		}
2228  
2229  		spin_unlock(&iocg->waitq.lock);
2230  	}
2231  
2232  	commit_weights(ioc);
2233  	return nr_debtors;
2234  }
2235  
ioc_timer_fn(struct timer_list * timer)2236  static void ioc_timer_fn(struct timer_list *timer)
2237  {
2238  	struct ioc *ioc = container_of(timer, struct ioc, timer);
2239  	struct ioc_gq *iocg, *tiocg;
2240  	struct ioc_now now;
2241  	LIST_HEAD(surpluses);
2242  	int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2243  	u64 usage_us_sum = 0;
2244  	u32 ppm_rthr;
2245  	u32 ppm_wthr;
2246  	u32 missed_ppm[2], rq_wait_pct;
2247  	u64 period_vtime;
2248  	int prev_busy_level;
2249  
2250  	/* how were the latencies during the period? */
2251  	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2252  
2253  	/* take care of active iocgs */
2254  	spin_lock_irq(&ioc->lock);
2255  
2256  	ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2257  	ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2258  	ioc_now(ioc, &now);
2259  
2260  	period_vtime = now.vnow - ioc->period_at_vtime;
2261  	if (WARN_ON_ONCE(!period_vtime)) {
2262  		spin_unlock_irq(&ioc->lock);
2263  		return;
2264  	}
2265  
2266  	nr_debtors = ioc_check_iocgs(ioc, &now);
2267  
2268  	/*
2269  	 * Wait and indebt stat are flushed above and the donation calculation
2270  	 * below needs updated usage stat. Let's bring stat up-to-date.
2271  	 */
2272  	iocg_flush_stat(&ioc->active_iocgs, &now);
2273  
2274  	/* calc usage and see whether some weights need to be moved around */
2275  	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2276  		u64 vdone, vtime, usage_us;
2277  		u32 hw_active, hw_inuse;
2278  
2279  		/*
2280  		 * Collect unused and wind vtime closer to vnow to prevent
2281  		 * iocgs from accumulating a large amount of budget.
2282  		 */
2283  		vdone = atomic64_read(&iocg->done_vtime);
2284  		vtime = atomic64_read(&iocg->vtime);
2285  		current_hweight(iocg, &hw_active, &hw_inuse);
2286  
2287  		/*
2288  		 * Latency QoS detection doesn't account for IOs which are
2289  		 * in-flight for longer than a period.  Detect them by
2290  		 * comparing vdone against period start.  If lagging behind
2291  		 * IOs from past periods, don't increase vrate.
2292  		 */
2293  		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2294  		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2295  		    time_after64(vtime, vdone) &&
2296  		    time_after64(vtime, now.vnow -
2297  				 MAX_LAGGING_PERIODS * period_vtime) &&
2298  		    time_before64(vdone, now.vnow - period_vtime))
2299  			nr_lagging++;
2300  
2301  		/*
2302  		 * Determine absolute usage factoring in in-flight IOs to avoid
2303  		 * high-latency completions appearing as idle.
2304  		 */
2305  		usage_us = iocg->usage_delta_us;
2306  		usage_us_sum += usage_us;
2307  
2308  		/* see whether there's surplus vtime */
2309  		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2310  		if (hw_inuse < hw_active ||
2311  		    (!waitqueue_active(&iocg->waitq) &&
2312  		     time_before64(vtime, now.vnow - ioc->margins.low))) {
2313  			u32 hwa, old_hwi, hwm, new_hwi, usage;
2314  			u64 usage_dur;
2315  
2316  			if (vdone != vtime) {
2317  				u64 inflight_us = DIV64_U64_ROUND_UP(
2318  					cost_to_abs_cost(vtime - vdone, hw_inuse),
2319  					ioc->vtime_base_rate);
2320  
2321  				usage_us = max(usage_us, inflight_us);
2322  			}
2323  
2324  			/* convert to hweight based usage ratio */
2325  			if (time_after64(iocg->activated_at, ioc->period_at))
2326  				usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2327  			else
2328  				usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2329  
2330  			usage = clamp_t(u32,
2331  				DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2332  						   usage_dur),
2333  				1, WEIGHT_ONE);
2334  
2335  			/*
2336  			 * Already donating or accumulated enough to start.
2337  			 * Determine the donation amount.
2338  			 */
2339  			current_hweight(iocg, &hwa, &old_hwi);
2340  			hwm = current_hweight_max(iocg);
2341  			new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2342  							 usage, &now);
2343  			/*
2344  			 * Donation calculation assumes hweight_after_donation
2345  			 * to be positive, a condition that a donor w/ hwa < 2
2346  			 * can't meet. Don't bother with donation if hwa is
2347  			 * below 2. It's not gonna make a meaningful difference
2348  			 * anyway.
2349  			 */
2350  			if (new_hwi < hwm && hwa >= 2) {
2351  				iocg->hweight_donating = hwa;
2352  				iocg->hweight_after_donation = new_hwi;
2353  				list_add(&iocg->surplus_list, &surpluses);
2354  			} else if (!iocg->abs_vdebt) {
2355  				/*
2356  				 * @iocg doesn't have enough to donate. Reset
2357  				 * its inuse to active.
2358  				 *
2359  				 * Don't reset debtors as their inuse's are
2360  				 * owned by debt handling. This shouldn't affect
2361  				 * donation calculuation in any meaningful way
2362  				 * as @iocg doesn't have a meaningful amount of
2363  				 * share anyway.
2364  				 */
2365  				TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2366  						iocg->inuse, iocg->active,
2367  						iocg->hweight_inuse, new_hwi);
2368  
2369  				__propagate_weights(iocg, iocg->active,
2370  						    iocg->active, true, &now);
2371  				nr_shortages++;
2372  			}
2373  		} else {
2374  			/* genuinely short on vtime */
2375  			nr_shortages++;
2376  		}
2377  	}
2378  
2379  	if (!list_empty(&surpluses) && nr_shortages)
2380  		transfer_surpluses(&surpluses, &now);
2381  
2382  	commit_weights(ioc);
2383  
2384  	/* surplus list should be dissolved after use */
2385  	list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2386  		list_del_init(&iocg->surplus_list);
2387  
2388  	/*
2389  	 * If q is getting clogged or we're missing too much, we're issuing
2390  	 * too much IO and should lower vtime rate.  If we're not missing
2391  	 * and experiencing shortages but not surpluses, we're too stingy
2392  	 * and should increase vtime rate.
2393  	 */
2394  	prev_busy_level = ioc->busy_level;
2395  	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2396  	    missed_ppm[READ] > ppm_rthr ||
2397  	    missed_ppm[WRITE] > ppm_wthr) {
2398  		/* clearly missing QoS targets, slow down vrate */
2399  		ioc->busy_level = max(ioc->busy_level, 0);
2400  		ioc->busy_level++;
2401  	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2402  		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2403  		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2404  		/* QoS targets are being met with >25% margin */
2405  		if (nr_shortages) {
2406  			/*
2407  			 * We're throttling while the device has spare
2408  			 * capacity.  If vrate was being slowed down, stop.
2409  			 */
2410  			ioc->busy_level = min(ioc->busy_level, 0);
2411  
2412  			/*
2413  			 * If there are IOs spanning multiple periods, wait
2414  			 * them out before pushing the device harder.
2415  			 */
2416  			if (!nr_lagging)
2417  				ioc->busy_level--;
2418  		} else {
2419  			/*
2420  			 * Nobody is being throttled and the users aren't
2421  			 * issuing enough IOs to saturate the device.  We
2422  			 * simply don't know how close the device is to
2423  			 * saturation.  Coast.
2424  			 */
2425  			ioc->busy_level = 0;
2426  		}
2427  	} else {
2428  		/* inside the hysterisis margin, we're good */
2429  		ioc->busy_level = 0;
2430  	}
2431  
2432  	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2433  
2434  	ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2435  			      prev_busy_level, missed_ppm);
2436  
2437  	ioc_refresh_params(ioc, false);
2438  
2439  	ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2440  
2441  	/*
2442  	 * This period is done.  Move onto the next one.  If nothing's
2443  	 * going on with the device, stop the timer.
2444  	 */
2445  	atomic64_inc(&ioc->cur_period);
2446  
2447  	if (ioc->running != IOC_STOP) {
2448  		if (!list_empty(&ioc->active_iocgs)) {
2449  			ioc_start_period(ioc, &now);
2450  		} else {
2451  			ioc->busy_level = 0;
2452  			ioc->vtime_err = 0;
2453  			ioc->running = IOC_IDLE;
2454  		}
2455  
2456  		ioc_refresh_vrate(ioc, &now);
2457  	}
2458  
2459  	spin_unlock_irq(&ioc->lock);
2460  }
2461  
adjust_inuse_and_calc_cost(struct ioc_gq * iocg,u64 vtime,u64 abs_cost,struct ioc_now * now)2462  static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2463  				      u64 abs_cost, struct ioc_now *now)
2464  {
2465  	struct ioc *ioc = iocg->ioc;
2466  	struct ioc_margins *margins = &ioc->margins;
2467  	u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2468  	u32 hwi, adj_step;
2469  	s64 margin;
2470  	u64 cost, new_inuse;
2471  	unsigned long flags;
2472  
2473  	current_hweight(iocg, NULL, &hwi);
2474  	old_hwi = hwi;
2475  	cost = abs_cost_to_cost(abs_cost, hwi);
2476  	margin = now->vnow - vtime - cost;
2477  
2478  	/* debt handling owns inuse for debtors */
2479  	if (iocg->abs_vdebt)
2480  		return cost;
2481  
2482  	/*
2483  	 * We only increase inuse during period and do so if the margin has
2484  	 * deteriorated since the previous adjustment.
2485  	 */
2486  	if (margin >= iocg->saved_margin || margin >= margins->low ||
2487  	    iocg->inuse == iocg->active)
2488  		return cost;
2489  
2490  	spin_lock_irqsave(&ioc->lock, flags);
2491  
2492  	/* we own inuse only when @iocg is in the normal active state */
2493  	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2494  		spin_unlock_irqrestore(&ioc->lock, flags);
2495  		return cost;
2496  	}
2497  
2498  	/*
2499  	 * Bump up inuse till @abs_cost fits in the existing budget.
2500  	 * adj_step must be determined after acquiring ioc->lock - we might
2501  	 * have raced and lost to another thread for activation and could
2502  	 * be reading 0 iocg->active before ioc->lock which will lead to
2503  	 * infinite loop.
2504  	 */
2505  	new_inuse = iocg->inuse;
2506  	adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2507  	do {
2508  		new_inuse = new_inuse + adj_step;
2509  		propagate_weights(iocg, iocg->active, new_inuse, true, now);
2510  		current_hweight(iocg, NULL, &hwi);
2511  		cost = abs_cost_to_cost(abs_cost, hwi);
2512  	} while (time_after64(vtime + cost, now->vnow) &&
2513  		 iocg->inuse != iocg->active);
2514  
2515  	spin_unlock_irqrestore(&ioc->lock, flags);
2516  
2517  	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2518  			old_inuse, iocg->inuse, old_hwi, hwi);
2519  
2520  	return cost;
2521  }
2522  
calc_vtime_cost_builtin(struct bio * bio,struct ioc_gq * iocg,bool is_merge,u64 * costp)2523  static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2524  				    bool is_merge, u64 *costp)
2525  {
2526  	struct ioc *ioc = iocg->ioc;
2527  	u64 coef_seqio, coef_randio, coef_page;
2528  	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2529  	u64 seek_pages = 0;
2530  	u64 cost = 0;
2531  
2532  	/* Can't calculate cost for empty bio */
2533  	if (!bio->bi_iter.bi_size)
2534  		goto out;
2535  
2536  	switch (bio_op(bio)) {
2537  	case REQ_OP_READ:
2538  		coef_seqio	= ioc->params.lcoefs[LCOEF_RSEQIO];
2539  		coef_randio	= ioc->params.lcoefs[LCOEF_RRANDIO];
2540  		coef_page	= ioc->params.lcoefs[LCOEF_RPAGE];
2541  		break;
2542  	case REQ_OP_WRITE:
2543  		coef_seqio	= ioc->params.lcoefs[LCOEF_WSEQIO];
2544  		coef_randio	= ioc->params.lcoefs[LCOEF_WRANDIO];
2545  		coef_page	= ioc->params.lcoefs[LCOEF_WPAGE];
2546  		break;
2547  	default:
2548  		goto out;
2549  	}
2550  
2551  	if (iocg->cursor) {
2552  		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2553  		seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2554  	}
2555  
2556  	if (!is_merge) {
2557  		if (seek_pages > LCOEF_RANDIO_PAGES) {
2558  			cost += coef_randio;
2559  		} else {
2560  			cost += coef_seqio;
2561  		}
2562  	}
2563  	cost += pages * coef_page;
2564  out:
2565  	*costp = cost;
2566  }
2567  
calc_vtime_cost(struct bio * bio,struct ioc_gq * iocg,bool is_merge)2568  static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2569  {
2570  	u64 cost;
2571  
2572  	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2573  	return cost;
2574  }
2575  
calc_size_vtime_cost_builtin(struct request * rq,struct ioc * ioc,u64 * costp)2576  static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2577  					 u64 *costp)
2578  {
2579  	unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2580  
2581  	switch (req_op(rq)) {
2582  	case REQ_OP_READ:
2583  		*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2584  		break;
2585  	case REQ_OP_WRITE:
2586  		*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2587  		break;
2588  	default:
2589  		*costp = 0;
2590  	}
2591  }
2592  
calc_size_vtime_cost(struct request * rq,struct ioc * ioc)2593  static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2594  {
2595  	u64 cost;
2596  
2597  	calc_size_vtime_cost_builtin(rq, ioc, &cost);
2598  	return cost;
2599  }
2600  
ioc_rqos_throttle(struct rq_qos * rqos,struct bio * bio)2601  static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2602  {
2603  	struct blkcg_gq *blkg = bio->bi_blkg;
2604  	struct ioc *ioc = rqos_to_ioc(rqos);
2605  	struct ioc_gq *iocg = blkg_to_iocg(blkg);
2606  	struct ioc_now now;
2607  	struct iocg_wait wait;
2608  	u64 abs_cost, cost, vtime;
2609  	bool use_debt, ioc_locked;
2610  	unsigned long flags;
2611  
2612  	/* bypass IOs if disabled, still initializing, or for root cgroup */
2613  	if (!ioc->enabled || !iocg || !iocg->level)
2614  		return;
2615  
2616  	/* calculate the absolute vtime cost */
2617  	abs_cost = calc_vtime_cost(bio, iocg, false);
2618  	if (!abs_cost)
2619  		return;
2620  
2621  	if (!iocg_activate(iocg, &now))
2622  		return;
2623  
2624  	iocg->cursor = bio_end_sector(bio);
2625  	vtime = atomic64_read(&iocg->vtime);
2626  	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2627  
2628  	/*
2629  	 * If no one's waiting and within budget, issue right away.  The
2630  	 * tests are racy but the races aren't systemic - we only miss once
2631  	 * in a while which is fine.
2632  	 */
2633  	if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2634  	    time_before_eq64(vtime + cost, now.vnow)) {
2635  		iocg_commit_bio(iocg, bio, abs_cost, cost);
2636  		return;
2637  	}
2638  
2639  	/*
2640  	 * We're over budget. This can be handled in two ways. IOs which may
2641  	 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2642  	 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2643  	 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2644  	 * whether debt handling is needed and acquire locks accordingly.
2645  	 */
2646  	use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2647  	ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2648  retry_lock:
2649  	iocg_lock(iocg, ioc_locked, &flags);
2650  
2651  	/*
2652  	 * @iocg must stay activated for debt and waitq handling. Deactivation
2653  	 * is synchronized against both ioc->lock and waitq.lock and we won't
2654  	 * get deactivated as long as we're waiting or has debt, so we're good
2655  	 * if we're activated here. In the unlikely cases that we aren't, just
2656  	 * issue the IO.
2657  	 */
2658  	if (unlikely(list_empty(&iocg->active_list))) {
2659  		iocg_unlock(iocg, ioc_locked, &flags);
2660  		iocg_commit_bio(iocg, bio, abs_cost, cost);
2661  		return;
2662  	}
2663  
2664  	/*
2665  	 * We're over budget. If @bio has to be issued regardless, remember
2666  	 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2667  	 * off the debt before waking more IOs.
2668  	 *
2669  	 * This way, the debt is continuously paid off each period with the
2670  	 * actual budget available to the cgroup. If we just wound vtime, we
2671  	 * would incorrectly use the current hw_inuse for the entire amount
2672  	 * which, for example, can lead to the cgroup staying blocked for a
2673  	 * long time even with substantially raised hw_inuse.
2674  	 *
2675  	 * An iocg with vdebt should stay online so that the timer can keep
2676  	 * deducting its vdebt and [de]activate use_delay mechanism
2677  	 * accordingly. We don't want to race against the timer trying to
2678  	 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2679  	 * penalizing the cgroup and its descendants.
2680  	 */
2681  	if (use_debt) {
2682  		iocg_incur_debt(iocg, abs_cost, &now);
2683  		if (iocg_kick_delay(iocg, &now))
2684  			blkcg_schedule_throttle(rqos->disk,
2685  					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2686  		iocg_unlock(iocg, ioc_locked, &flags);
2687  		return;
2688  	}
2689  
2690  	/* guarantee that iocgs w/ waiters have maximum inuse */
2691  	if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2692  		if (!ioc_locked) {
2693  			iocg_unlock(iocg, false, &flags);
2694  			ioc_locked = true;
2695  			goto retry_lock;
2696  		}
2697  		propagate_weights(iocg, iocg->active, iocg->active, true,
2698  				  &now);
2699  	}
2700  
2701  	/*
2702  	 * Append self to the waitq and schedule the wakeup timer if we're
2703  	 * the first waiter.  The timer duration is calculated based on the
2704  	 * current vrate.  vtime and hweight changes can make it too short
2705  	 * or too long.  Each wait entry records the absolute cost it's
2706  	 * waiting for to allow re-evaluation using a custom wait entry.
2707  	 *
2708  	 * If too short, the timer simply reschedules itself.  If too long,
2709  	 * the period timer will notice and trigger wakeups.
2710  	 *
2711  	 * All waiters are on iocg->waitq and the wait states are
2712  	 * synchronized using waitq.lock.
2713  	 */
2714  	init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2715  	wait.wait.private = current;
2716  	wait.bio = bio;
2717  	wait.abs_cost = abs_cost;
2718  	wait.committed = false;	/* will be set true by waker */
2719  
2720  	__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2721  	iocg_kick_waitq(iocg, ioc_locked, &now);
2722  
2723  	iocg_unlock(iocg, ioc_locked, &flags);
2724  
2725  	while (true) {
2726  		set_current_state(TASK_UNINTERRUPTIBLE);
2727  		if (wait.committed)
2728  			break;
2729  		io_schedule();
2730  	}
2731  
2732  	/* waker already committed us, proceed */
2733  	finish_wait(&iocg->waitq, &wait.wait);
2734  }
2735  
ioc_rqos_merge(struct rq_qos * rqos,struct request * rq,struct bio * bio)2736  static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2737  			   struct bio *bio)
2738  {
2739  	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2740  	struct ioc *ioc = rqos_to_ioc(rqos);
2741  	sector_t bio_end = bio_end_sector(bio);
2742  	struct ioc_now now;
2743  	u64 vtime, abs_cost, cost;
2744  	unsigned long flags;
2745  
2746  	/* bypass if disabled, still initializing, or for root cgroup */
2747  	if (!ioc->enabled || !iocg || !iocg->level)
2748  		return;
2749  
2750  	abs_cost = calc_vtime_cost(bio, iocg, true);
2751  	if (!abs_cost)
2752  		return;
2753  
2754  	ioc_now(ioc, &now);
2755  
2756  	vtime = atomic64_read(&iocg->vtime);
2757  	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2758  
2759  	/* update cursor if backmerging into the request at the cursor */
2760  	if (blk_rq_pos(rq) < bio_end &&
2761  	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2762  		iocg->cursor = bio_end;
2763  
2764  	/*
2765  	 * Charge if there's enough vtime budget and the existing request has
2766  	 * cost assigned.
2767  	 */
2768  	if (rq->bio && rq->bio->bi_iocost_cost &&
2769  	    time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2770  		iocg_commit_bio(iocg, bio, abs_cost, cost);
2771  		return;
2772  	}
2773  
2774  	/*
2775  	 * Otherwise, account it as debt if @iocg is online, which it should
2776  	 * be for the vast majority of cases. See debt handling in
2777  	 * ioc_rqos_throttle() for details.
2778  	 */
2779  	spin_lock_irqsave(&ioc->lock, flags);
2780  	spin_lock(&iocg->waitq.lock);
2781  
2782  	if (likely(!list_empty(&iocg->active_list))) {
2783  		iocg_incur_debt(iocg, abs_cost, &now);
2784  		if (iocg_kick_delay(iocg, &now))
2785  			blkcg_schedule_throttle(rqos->disk,
2786  					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2787  	} else {
2788  		iocg_commit_bio(iocg, bio, abs_cost, cost);
2789  	}
2790  
2791  	spin_unlock(&iocg->waitq.lock);
2792  	spin_unlock_irqrestore(&ioc->lock, flags);
2793  }
2794  
ioc_rqos_done_bio(struct rq_qos * rqos,struct bio * bio)2795  static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2796  {
2797  	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2798  
2799  	if (iocg && bio->bi_iocost_cost)
2800  		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2801  }
2802  
ioc_rqos_done(struct rq_qos * rqos,struct request * rq)2803  static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2804  {
2805  	struct ioc *ioc = rqos_to_ioc(rqos);
2806  	struct ioc_pcpu_stat *ccs;
2807  	u64 on_q_ns, rq_wait_ns, size_nsec;
2808  	int pidx, rw;
2809  
2810  	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2811  		return;
2812  
2813  	switch (req_op(rq)) {
2814  	case REQ_OP_READ:
2815  		pidx = QOS_RLAT;
2816  		rw = READ;
2817  		break;
2818  	case REQ_OP_WRITE:
2819  		pidx = QOS_WLAT;
2820  		rw = WRITE;
2821  		break;
2822  	default:
2823  		return;
2824  	}
2825  
2826  	on_q_ns = blk_time_get_ns() - rq->alloc_time_ns;
2827  	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2828  	size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2829  
2830  	ccs = get_cpu_ptr(ioc->pcpu_stat);
2831  
2832  	if (on_q_ns <= size_nsec ||
2833  	    on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2834  		local_inc(&ccs->missed[rw].nr_met);
2835  	else
2836  		local_inc(&ccs->missed[rw].nr_missed);
2837  
2838  	local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2839  
2840  	put_cpu_ptr(ccs);
2841  }
2842  
ioc_rqos_queue_depth_changed(struct rq_qos * rqos)2843  static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2844  {
2845  	struct ioc *ioc = rqos_to_ioc(rqos);
2846  
2847  	spin_lock_irq(&ioc->lock);
2848  	ioc_refresh_params(ioc, false);
2849  	spin_unlock_irq(&ioc->lock);
2850  }
2851  
ioc_rqos_exit(struct rq_qos * rqos)2852  static void ioc_rqos_exit(struct rq_qos *rqos)
2853  {
2854  	struct ioc *ioc = rqos_to_ioc(rqos);
2855  
2856  	blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iocost);
2857  
2858  	spin_lock_irq(&ioc->lock);
2859  	ioc->running = IOC_STOP;
2860  	spin_unlock_irq(&ioc->lock);
2861  
2862  	timer_shutdown_sync(&ioc->timer);
2863  	free_percpu(ioc->pcpu_stat);
2864  	kfree(ioc);
2865  }
2866  
2867  static const struct rq_qos_ops ioc_rqos_ops = {
2868  	.throttle = ioc_rqos_throttle,
2869  	.merge = ioc_rqos_merge,
2870  	.done_bio = ioc_rqos_done_bio,
2871  	.done = ioc_rqos_done,
2872  	.queue_depth_changed = ioc_rqos_queue_depth_changed,
2873  	.exit = ioc_rqos_exit,
2874  };
2875  
blk_iocost_init(struct gendisk * disk)2876  static int blk_iocost_init(struct gendisk *disk)
2877  {
2878  	struct ioc *ioc;
2879  	int i, cpu, ret;
2880  
2881  	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2882  	if (!ioc)
2883  		return -ENOMEM;
2884  
2885  	ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2886  	if (!ioc->pcpu_stat) {
2887  		kfree(ioc);
2888  		return -ENOMEM;
2889  	}
2890  
2891  	for_each_possible_cpu(cpu) {
2892  		struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2893  
2894  		for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2895  			local_set(&ccs->missed[i].nr_met, 0);
2896  			local_set(&ccs->missed[i].nr_missed, 0);
2897  		}
2898  		local64_set(&ccs->rq_wait_ns, 0);
2899  	}
2900  
2901  	spin_lock_init(&ioc->lock);
2902  	timer_setup(&ioc->timer, ioc_timer_fn, 0);
2903  	INIT_LIST_HEAD(&ioc->active_iocgs);
2904  
2905  	ioc->running = IOC_IDLE;
2906  	ioc->vtime_base_rate = VTIME_PER_USEC;
2907  	atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2908  	seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2909  	ioc->period_at = ktime_to_us(blk_time_get());
2910  	atomic64_set(&ioc->cur_period, 0);
2911  	atomic_set(&ioc->hweight_gen, 0);
2912  
2913  	spin_lock_irq(&ioc->lock);
2914  	ioc->autop_idx = AUTOP_INVALID;
2915  	ioc_refresh_params_disk(ioc, true, disk);
2916  	spin_unlock_irq(&ioc->lock);
2917  
2918  	/*
2919  	 * rqos must be added before activation to allow ioc_pd_init() to
2920  	 * lookup the ioc from q. This means that the rqos methods may get
2921  	 * called before policy activation completion, can't assume that the
2922  	 * target bio has an iocg associated and need to test for NULL iocg.
2923  	 */
2924  	ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops);
2925  	if (ret)
2926  		goto err_free_ioc;
2927  
2928  	ret = blkcg_activate_policy(disk, &blkcg_policy_iocost);
2929  	if (ret)
2930  		goto err_del_qos;
2931  	return 0;
2932  
2933  err_del_qos:
2934  	rq_qos_del(&ioc->rqos);
2935  err_free_ioc:
2936  	free_percpu(ioc->pcpu_stat);
2937  	kfree(ioc);
2938  	return ret;
2939  }
2940  
ioc_cpd_alloc(gfp_t gfp)2941  static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2942  {
2943  	struct ioc_cgrp *iocc;
2944  
2945  	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2946  	if (!iocc)
2947  		return NULL;
2948  
2949  	iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2950  	return &iocc->cpd;
2951  }
2952  
ioc_cpd_free(struct blkcg_policy_data * cpd)2953  static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2954  {
2955  	kfree(container_of(cpd, struct ioc_cgrp, cpd));
2956  }
2957  
ioc_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)2958  static struct blkg_policy_data *ioc_pd_alloc(struct gendisk *disk,
2959  		struct blkcg *blkcg, gfp_t gfp)
2960  {
2961  	int levels = blkcg->css.cgroup->level + 1;
2962  	struct ioc_gq *iocg;
2963  
2964  	iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp,
2965  			    disk->node_id);
2966  	if (!iocg)
2967  		return NULL;
2968  
2969  	iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2970  	if (!iocg->pcpu_stat) {
2971  		kfree(iocg);
2972  		return NULL;
2973  	}
2974  
2975  	return &iocg->pd;
2976  }
2977  
ioc_pd_init(struct blkg_policy_data * pd)2978  static void ioc_pd_init(struct blkg_policy_data *pd)
2979  {
2980  	struct ioc_gq *iocg = pd_to_iocg(pd);
2981  	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2982  	struct ioc *ioc = q_to_ioc(blkg->q);
2983  	struct ioc_now now;
2984  	struct blkcg_gq *tblkg;
2985  	unsigned long flags;
2986  
2987  	ioc_now(ioc, &now);
2988  
2989  	iocg->ioc = ioc;
2990  	atomic64_set(&iocg->vtime, now.vnow);
2991  	atomic64_set(&iocg->done_vtime, now.vnow);
2992  	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2993  	INIT_LIST_HEAD(&iocg->active_list);
2994  	INIT_LIST_HEAD(&iocg->walk_list);
2995  	INIT_LIST_HEAD(&iocg->surplus_list);
2996  	iocg->hweight_active = WEIGHT_ONE;
2997  	iocg->hweight_inuse = WEIGHT_ONE;
2998  
2999  	init_waitqueue_head(&iocg->waitq);
3000  	hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
3001  	iocg->waitq_timer.function = iocg_waitq_timer_fn;
3002  
3003  	iocg->level = blkg->blkcg->css.cgroup->level;
3004  
3005  	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
3006  		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
3007  		iocg->ancestors[tiocg->level] = tiocg;
3008  	}
3009  
3010  	spin_lock_irqsave(&ioc->lock, flags);
3011  	weight_updated(iocg, &now);
3012  	spin_unlock_irqrestore(&ioc->lock, flags);
3013  }
3014  
ioc_pd_free(struct blkg_policy_data * pd)3015  static void ioc_pd_free(struct blkg_policy_data *pd)
3016  {
3017  	struct ioc_gq *iocg = pd_to_iocg(pd);
3018  	struct ioc *ioc = iocg->ioc;
3019  	unsigned long flags;
3020  
3021  	if (ioc) {
3022  		spin_lock_irqsave(&ioc->lock, flags);
3023  
3024  		if (!list_empty(&iocg->active_list)) {
3025  			struct ioc_now now;
3026  
3027  			ioc_now(ioc, &now);
3028  			propagate_weights(iocg, 0, 0, false, &now);
3029  			list_del_init(&iocg->active_list);
3030  		}
3031  
3032  		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3033  		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3034  
3035  		spin_unlock_irqrestore(&ioc->lock, flags);
3036  
3037  		hrtimer_cancel(&iocg->waitq_timer);
3038  	}
3039  	free_percpu(iocg->pcpu_stat);
3040  	kfree(iocg);
3041  }
3042  
ioc_pd_stat(struct blkg_policy_data * pd,struct seq_file * s)3043  static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3044  {
3045  	struct ioc_gq *iocg = pd_to_iocg(pd);
3046  	struct ioc *ioc = iocg->ioc;
3047  
3048  	if (!ioc->enabled)
3049  		return;
3050  
3051  	if (iocg->level == 0) {
3052  		unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3053  			ioc->vtime_base_rate * 10000,
3054  			VTIME_PER_USEC);
3055  		seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3056  	}
3057  
3058  	seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3059  
3060  	if (blkcg_debug_stats)
3061  		seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3062  			iocg->last_stat.wait_us,
3063  			iocg->last_stat.indebt_us,
3064  			iocg->last_stat.indelay_us);
3065  }
3066  
ioc_weight_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3067  static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3068  			     int off)
3069  {
3070  	const char *dname = blkg_dev_name(pd->blkg);
3071  	struct ioc_gq *iocg = pd_to_iocg(pd);
3072  
3073  	if (dname && iocg->cfg_weight)
3074  		seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3075  	return 0;
3076  }
3077  
3078  
ioc_weight_show(struct seq_file * sf,void * v)3079  static int ioc_weight_show(struct seq_file *sf, void *v)
3080  {
3081  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3082  	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3083  
3084  	seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3085  	blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3086  			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3087  	return 0;
3088  }
3089  
ioc_weight_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3090  static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3091  				size_t nbytes, loff_t off)
3092  {
3093  	struct blkcg *blkcg = css_to_blkcg(of_css(of));
3094  	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3095  	struct blkg_conf_ctx ctx;
3096  	struct ioc_now now;
3097  	struct ioc_gq *iocg;
3098  	u32 v;
3099  	int ret;
3100  
3101  	if (!strchr(buf, ':')) {
3102  		struct blkcg_gq *blkg;
3103  
3104  		if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3105  			return -EINVAL;
3106  
3107  		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3108  			return -EINVAL;
3109  
3110  		spin_lock_irq(&blkcg->lock);
3111  		iocc->dfl_weight = v * WEIGHT_ONE;
3112  		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3113  			struct ioc_gq *iocg = blkg_to_iocg(blkg);
3114  
3115  			if (iocg) {
3116  				spin_lock(&iocg->ioc->lock);
3117  				ioc_now(iocg->ioc, &now);
3118  				weight_updated(iocg, &now);
3119  				spin_unlock(&iocg->ioc->lock);
3120  			}
3121  		}
3122  		spin_unlock_irq(&blkcg->lock);
3123  
3124  		return nbytes;
3125  	}
3126  
3127  	blkg_conf_init(&ctx, buf);
3128  
3129  	ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, &ctx);
3130  	if (ret)
3131  		goto err;
3132  
3133  	iocg = blkg_to_iocg(ctx.blkg);
3134  
3135  	if (!strncmp(ctx.body, "default", 7)) {
3136  		v = 0;
3137  	} else {
3138  		if (!sscanf(ctx.body, "%u", &v))
3139  			goto einval;
3140  		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3141  			goto einval;
3142  	}
3143  
3144  	spin_lock(&iocg->ioc->lock);
3145  	iocg->cfg_weight = v * WEIGHT_ONE;
3146  	ioc_now(iocg->ioc, &now);
3147  	weight_updated(iocg, &now);
3148  	spin_unlock(&iocg->ioc->lock);
3149  
3150  	blkg_conf_exit(&ctx);
3151  	return nbytes;
3152  
3153  einval:
3154  	ret = -EINVAL;
3155  err:
3156  	blkg_conf_exit(&ctx);
3157  	return ret;
3158  }
3159  
ioc_qos_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3160  static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3161  			  int off)
3162  {
3163  	const char *dname = blkg_dev_name(pd->blkg);
3164  	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3165  
3166  	if (!dname)
3167  		return 0;
3168  
3169  	spin_lock(&ioc->lock);
3170  	seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3171  		   dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3172  		   ioc->params.qos[QOS_RPPM] / 10000,
3173  		   ioc->params.qos[QOS_RPPM] % 10000 / 100,
3174  		   ioc->params.qos[QOS_RLAT],
3175  		   ioc->params.qos[QOS_WPPM] / 10000,
3176  		   ioc->params.qos[QOS_WPPM] % 10000 / 100,
3177  		   ioc->params.qos[QOS_WLAT],
3178  		   ioc->params.qos[QOS_MIN] / 10000,
3179  		   ioc->params.qos[QOS_MIN] % 10000 / 100,
3180  		   ioc->params.qos[QOS_MAX] / 10000,
3181  		   ioc->params.qos[QOS_MAX] % 10000 / 100);
3182  	spin_unlock(&ioc->lock);
3183  	return 0;
3184  }
3185  
ioc_qos_show(struct seq_file * sf,void * v)3186  static int ioc_qos_show(struct seq_file *sf, void *v)
3187  {
3188  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3189  
3190  	blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3191  			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3192  	return 0;
3193  }
3194  
3195  static const match_table_t qos_ctrl_tokens = {
3196  	{ QOS_ENABLE,		"enable=%u"	},
3197  	{ QOS_CTRL,		"ctrl=%s"	},
3198  	{ NR_QOS_CTRL_PARAMS,	NULL		},
3199  };
3200  
3201  static const match_table_t qos_tokens = {
3202  	{ QOS_RPPM,		"rpct=%s"	},
3203  	{ QOS_RLAT,		"rlat=%u"	},
3204  	{ QOS_WPPM,		"wpct=%s"	},
3205  	{ QOS_WLAT,		"wlat=%u"	},
3206  	{ QOS_MIN,		"min=%s"	},
3207  	{ QOS_MAX,		"max=%s"	},
3208  	{ NR_QOS_PARAMS,	NULL		},
3209  };
3210  
ioc_qos_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3211  static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3212  			     size_t nbytes, loff_t off)
3213  {
3214  	struct blkg_conf_ctx ctx;
3215  	struct gendisk *disk;
3216  	struct ioc *ioc;
3217  	u32 qos[NR_QOS_PARAMS];
3218  	bool enable, user;
3219  	char *body, *p;
3220  	int ret;
3221  
3222  	blkg_conf_init(&ctx, input);
3223  
3224  	ret = blkg_conf_open_bdev(&ctx);
3225  	if (ret)
3226  		goto err;
3227  
3228  	body = ctx.body;
3229  	disk = ctx.bdev->bd_disk;
3230  	if (!queue_is_mq(disk->queue)) {
3231  		ret = -EOPNOTSUPP;
3232  		goto err;
3233  	}
3234  
3235  	ioc = q_to_ioc(disk->queue);
3236  	if (!ioc) {
3237  		ret = blk_iocost_init(disk);
3238  		if (ret)
3239  			goto err;
3240  		ioc = q_to_ioc(disk->queue);
3241  	}
3242  
3243  	blk_mq_freeze_queue(disk->queue);
3244  	blk_mq_quiesce_queue(disk->queue);
3245  
3246  	spin_lock_irq(&ioc->lock);
3247  	memcpy(qos, ioc->params.qos, sizeof(qos));
3248  	enable = ioc->enabled;
3249  	user = ioc->user_qos_params;
3250  
3251  	while ((p = strsep(&body, " \t\n"))) {
3252  		substring_t args[MAX_OPT_ARGS];
3253  		char buf[32];
3254  		int tok;
3255  		s64 v;
3256  
3257  		if (!*p)
3258  			continue;
3259  
3260  		switch (match_token(p, qos_ctrl_tokens, args)) {
3261  		case QOS_ENABLE:
3262  			if (match_u64(&args[0], &v))
3263  				goto einval;
3264  			enable = v;
3265  			continue;
3266  		case QOS_CTRL:
3267  			match_strlcpy(buf, &args[0], sizeof(buf));
3268  			if (!strcmp(buf, "auto"))
3269  				user = false;
3270  			else if (!strcmp(buf, "user"))
3271  				user = true;
3272  			else
3273  				goto einval;
3274  			continue;
3275  		}
3276  
3277  		tok = match_token(p, qos_tokens, args);
3278  		switch (tok) {
3279  		case QOS_RPPM:
3280  		case QOS_WPPM:
3281  			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3282  			    sizeof(buf))
3283  				goto einval;
3284  			if (cgroup_parse_float(buf, 2, &v))
3285  				goto einval;
3286  			if (v < 0 || v > 10000)
3287  				goto einval;
3288  			qos[tok] = v * 100;
3289  			break;
3290  		case QOS_RLAT:
3291  		case QOS_WLAT:
3292  			if (match_u64(&args[0], &v))
3293  				goto einval;
3294  			qos[tok] = v;
3295  			break;
3296  		case QOS_MIN:
3297  		case QOS_MAX:
3298  			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3299  			    sizeof(buf))
3300  				goto einval;
3301  			if (cgroup_parse_float(buf, 2, &v))
3302  				goto einval;
3303  			if (v < 0)
3304  				goto einval;
3305  			qos[tok] = clamp_t(s64, v * 100,
3306  					   VRATE_MIN_PPM, VRATE_MAX_PPM);
3307  			break;
3308  		default:
3309  			goto einval;
3310  		}
3311  		user = true;
3312  	}
3313  
3314  	if (qos[QOS_MIN] > qos[QOS_MAX])
3315  		goto einval;
3316  
3317  	if (enable && !ioc->enabled) {
3318  		blk_stat_enable_accounting(disk->queue);
3319  		blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3320  		ioc->enabled = true;
3321  	} else if (!enable && ioc->enabled) {
3322  		blk_stat_disable_accounting(disk->queue);
3323  		blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3324  		ioc->enabled = false;
3325  	}
3326  
3327  	if (user) {
3328  		memcpy(ioc->params.qos, qos, sizeof(qos));
3329  		ioc->user_qos_params = true;
3330  	} else {
3331  		ioc->user_qos_params = false;
3332  	}
3333  
3334  	ioc_refresh_params(ioc, true);
3335  	spin_unlock_irq(&ioc->lock);
3336  
3337  	if (enable)
3338  		wbt_disable_default(disk);
3339  	else
3340  		wbt_enable_default(disk);
3341  
3342  	blk_mq_unquiesce_queue(disk->queue);
3343  	blk_mq_unfreeze_queue(disk->queue);
3344  
3345  	blkg_conf_exit(&ctx);
3346  	return nbytes;
3347  einval:
3348  	spin_unlock_irq(&ioc->lock);
3349  
3350  	blk_mq_unquiesce_queue(disk->queue);
3351  	blk_mq_unfreeze_queue(disk->queue);
3352  
3353  	ret = -EINVAL;
3354  err:
3355  	blkg_conf_exit(&ctx);
3356  	return ret;
3357  }
3358  
ioc_cost_model_prfill(struct seq_file * sf,struct blkg_policy_data * pd,int off)3359  static u64 ioc_cost_model_prfill(struct seq_file *sf,
3360  				 struct blkg_policy_data *pd, int off)
3361  {
3362  	const char *dname = blkg_dev_name(pd->blkg);
3363  	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3364  	u64 *u = ioc->params.i_lcoefs;
3365  
3366  	if (!dname)
3367  		return 0;
3368  
3369  	spin_lock(&ioc->lock);
3370  	seq_printf(sf, "%s ctrl=%s model=linear "
3371  		   "rbps=%llu rseqiops=%llu rrandiops=%llu "
3372  		   "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3373  		   dname, ioc->user_cost_model ? "user" : "auto",
3374  		   u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3375  		   u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3376  	spin_unlock(&ioc->lock);
3377  	return 0;
3378  }
3379  
ioc_cost_model_show(struct seq_file * sf,void * v)3380  static int ioc_cost_model_show(struct seq_file *sf, void *v)
3381  {
3382  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3383  
3384  	blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3385  			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3386  	return 0;
3387  }
3388  
3389  static const match_table_t cost_ctrl_tokens = {
3390  	{ COST_CTRL,		"ctrl=%s"	},
3391  	{ COST_MODEL,		"model=%s"	},
3392  	{ NR_COST_CTRL_PARAMS,	NULL		},
3393  };
3394  
3395  static const match_table_t i_lcoef_tokens = {
3396  	{ I_LCOEF_RBPS,		"rbps=%u"	},
3397  	{ I_LCOEF_RSEQIOPS,	"rseqiops=%u"	},
3398  	{ I_LCOEF_RRANDIOPS,	"rrandiops=%u"	},
3399  	{ I_LCOEF_WBPS,		"wbps=%u"	},
3400  	{ I_LCOEF_WSEQIOPS,	"wseqiops=%u"	},
3401  	{ I_LCOEF_WRANDIOPS,	"wrandiops=%u"	},
3402  	{ NR_I_LCOEFS,		NULL		},
3403  };
3404  
ioc_cost_model_write(struct kernfs_open_file * of,char * input,size_t nbytes,loff_t off)3405  static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3406  				    size_t nbytes, loff_t off)
3407  {
3408  	struct blkg_conf_ctx ctx;
3409  	struct request_queue *q;
3410  	struct ioc *ioc;
3411  	u64 u[NR_I_LCOEFS];
3412  	bool user;
3413  	char *body, *p;
3414  	int ret;
3415  
3416  	blkg_conf_init(&ctx, input);
3417  
3418  	ret = blkg_conf_open_bdev(&ctx);
3419  	if (ret)
3420  		goto err;
3421  
3422  	body = ctx.body;
3423  	q = bdev_get_queue(ctx.bdev);
3424  	if (!queue_is_mq(q)) {
3425  		ret = -EOPNOTSUPP;
3426  		goto err;
3427  	}
3428  
3429  	ioc = q_to_ioc(q);
3430  	if (!ioc) {
3431  		ret = blk_iocost_init(ctx.bdev->bd_disk);
3432  		if (ret)
3433  			goto err;
3434  		ioc = q_to_ioc(q);
3435  	}
3436  
3437  	blk_mq_freeze_queue(q);
3438  	blk_mq_quiesce_queue(q);
3439  
3440  	spin_lock_irq(&ioc->lock);
3441  	memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3442  	user = ioc->user_cost_model;
3443  
3444  	while ((p = strsep(&body, " \t\n"))) {
3445  		substring_t args[MAX_OPT_ARGS];
3446  		char buf[32];
3447  		int tok;
3448  		u64 v;
3449  
3450  		if (!*p)
3451  			continue;
3452  
3453  		switch (match_token(p, cost_ctrl_tokens, args)) {
3454  		case COST_CTRL:
3455  			match_strlcpy(buf, &args[0], sizeof(buf));
3456  			if (!strcmp(buf, "auto"))
3457  				user = false;
3458  			else if (!strcmp(buf, "user"))
3459  				user = true;
3460  			else
3461  				goto einval;
3462  			continue;
3463  		case COST_MODEL:
3464  			match_strlcpy(buf, &args[0], sizeof(buf));
3465  			if (strcmp(buf, "linear"))
3466  				goto einval;
3467  			continue;
3468  		}
3469  
3470  		tok = match_token(p, i_lcoef_tokens, args);
3471  		if (tok == NR_I_LCOEFS)
3472  			goto einval;
3473  		if (match_u64(&args[0], &v))
3474  			goto einval;
3475  		u[tok] = v;
3476  		user = true;
3477  	}
3478  
3479  	if (user) {
3480  		memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3481  		ioc->user_cost_model = true;
3482  	} else {
3483  		ioc->user_cost_model = false;
3484  	}
3485  	ioc_refresh_params(ioc, true);
3486  	spin_unlock_irq(&ioc->lock);
3487  
3488  	blk_mq_unquiesce_queue(q);
3489  	blk_mq_unfreeze_queue(q);
3490  
3491  	blkg_conf_exit(&ctx);
3492  	return nbytes;
3493  
3494  einval:
3495  	spin_unlock_irq(&ioc->lock);
3496  
3497  	blk_mq_unquiesce_queue(q);
3498  	blk_mq_unfreeze_queue(q);
3499  
3500  	ret = -EINVAL;
3501  err:
3502  	blkg_conf_exit(&ctx);
3503  	return ret;
3504  }
3505  
3506  static struct cftype ioc_files[] = {
3507  	{
3508  		.name = "weight",
3509  		.flags = CFTYPE_NOT_ON_ROOT,
3510  		.seq_show = ioc_weight_show,
3511  		.write = ioc_weight_write,
3512  	},
3513  	{
3514  		.name = "cost.qos",
3515  		.flags = CFTYPE_ONLY_ON_ROOT,
3516  		.seq_show = ioc_qos_show,
3517  		.write = ioc_qos_write,
3518  	},
3519  	{
3520  		.name = "cost.model",
3521  		.flags = CFTYPE_ONLY_ON_ROOT,
3522  		.seq_show = ioc_cost_model_show,
3523  		.write = ioc_cost_model_write,
3524  	},
3525  	{}
3526  };
3527  
3528  static struct blkcg_policy blkcg_policy_iocost = {
3529  	.dfl_cftypes	= ioc_files,
3530  	.cpd_alloc_fn	= ioc_cpd_alloc,
3531  	.cpd_free_fn	= ioc_cpd_free,
3532  	.pd_alloc_fn	= ioc_pd_alloc,
3533  	.pd_init_fn	= ioc_pd_init,
3534  	.pd_free_fn	= ioc_pd_free,
3535  	.pd_stat_fn	= ioc_pd_stat,
3536  };
3537  
ioc_init(void)3538  static int __init ioc_init(void)
3539  {
3540  	return blkcg_policy_register(&blkcg_policy_iocost);
3541  }
3542  
ioc_exit(void)3543  static void __exit ioc_exit(void)
3544  {
3545  	blkcg_policy_unregister(&blkcg_policy_iocost);
3546  }
3547  
3548  module_init(ioc_init);
3549  module_exit(ioc_exit);
3550