Lines Matching refs:ioc
406 struct ioc { struct
464 struct ioc *ioc; argument
660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) in rqos_to_ioc()
662 return container_of(rqos, struct ioc, rqos); in rqos_to_ioc()
665 static struct ioc *q_to_ioc(struct request_queue *q) in q_to_ioc()
670 static const char __maybe_unused *ioc_name(struct ioc *ioc) in ioc_name() argument
672 struct gendisk *disk = ioc->rqos.disk; in ioc_name()
733 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
744 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
753 static void ioc_refresh_margins(struct ioc *ioc) in ioc_refresh_margins() argument
755 struct ioc_margins *margins = &ioc->margins; in ioc_refresh_margins()
756 u32 period_us = ioc->period_us; in ioc_refresh_margins()
757 u64 vrate = ioc->vtime_base_rate; in ioc_refresh_margins()
765 static void ioc_refresh_period_us(struct ioc *ioc) in ioc_refresh_period_us() argument
769 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us()
772 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { in ioc_refresh_period_us()
773 ppm = ioc->params.qos[QOS_RPPM]; in ioc_refresh_period_us()
774 lat = ioc->params.qos[QOS_RLAT]; in ioc_refresh_period_us()
776 ppm = ioc->params.qos[QOS_WPPM]; in ioc_refresh_period_us()
777 lat = ioc->params.qos[QOS_WLAT]; in ioc_refresh_period_us()
796 ioc->period_us = period_us; in ioc_refresh_period_us()
797 ioc->timer_slack_ns = div64_u64( in ioc_refresh_period_us()
800 ioc_refresh_margins(ioc); in ioc_refresh_period_us()
807 static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk) in ioc_autop_idx() argument
809 int idx = ioc->autop_idx; in ioc_autop_idx()
827 if (ioc->user_qos_params || ioc->user_cost_model) in ioc_autop_idx()
831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); in ioc_autop_idx()
835 if (!ioc->autop_too_fast_at) in ioc_autop_idx()
836 ioc->autop_too_fast_at = now_ns; in ioc_autop_idx()
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
840 ioc->autop_too_fast_at = 0; in ioc_autop_idx()
844 if (!ioc->autop_too_slow_at) in ioc_autop_idx()
845 ioc->autop_too_slow_at = now_ns; in ioc_autop_idx()
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
849 ioc->autop_too_slow_at = 0; in ioc_autop_idx()
897 static void ioc_refresh_lcoefs(struct ioc *ioc) in ioc_refresh_lcoefs() argument
899 u64 *u = ioc->params.i_lcoefs; in ioc_refresh_lcoefs()
900 u64 *c = ioc->params.lcoefs; in ioc_refresh_lcoefs()
912 static bool ioc_refresh_params_disk(struct ioc *ioc, bool force, in ioc_refresh_params_disk() argument
918 lockdep_assert_held(&ioc->lock); in ioc_refresh_params_disk()
920 idx = ioc_autop_idx(ioc, disk); in ioc_refresh_params_disk()
923 if (idx == ioc->autop_idx && !force) in ioc_refresh_params_disk()
926 if (idx != ioc->autop_idx) { in ioc_refresh_params_disk()
927 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in ioc_refresh_params_disk()
928 ioc->vtime_base_rate = VTIME_PER_USEC; in ioc_refresh_params_disk()
931 ioc->autop_idx = idx; in ioc_refresh_params_disk()
932 ioc->autop_too_fast_at = 0; in ioc_refresh_params_disk()
933 ioc->autop_too_slow_at = 0; in ioc_refresh_params_disk()
935 if (!ioc->user_qos_params) in ioc_refresh_params_disk()
936 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); in ioc_refresh_params_disk()
937 if (!ioc->user_cost_model) in ioc_refresh_params_disk()
938 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); in ioc_refresh_params_disk()
940 ioc_refresh_period_us(ioc); in ioc_refresh_params_disk()
941 ioc_refresh_lcoefs(ioc); in ioc_refresh_params_disk()
943 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * in ioc_refresh_params_disk()
945 ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] * in ioc_refresh_params_disk()
951 static bool ioc_refresh_params(struct ioc *ioc, bool force) in ioc_refresh_params() argument
953 return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk); in ioc_refresh_params()
963 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument
965 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate()
966 s64 vperiod = ioc->period_us * ioc->vtime_base_rate; in ioc_refresh_vrate()
969 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate()
980 vcomp = -div64_s64(ioc->vtime_err, pleft); in ioc_refresh_vrate()
981 vcomp_min = -(ioc->vtime_base_rate >> 1); in ioc_refresh_vrate()
982 vcomp_max = ioc->vtime_base_rate; in ioc_refresh_vrate()
985 ioc->vtime_err += vcomp * pleft; in ioc_refresh_vrate()
987 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); in ioc_refresh_vrate()
990 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); in ioc_refresh_vrate()
993 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct, in ioc_adjust_base_vrate() argument
997 u64 vrate = ioc->vtime_base_rate; in ioc_adjust_base_vrate()
998 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; in ioc_adjust_base_vrate()
1000 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) { in ioc_adjust_base_vrate()
1001 if (ioc->busy_level != prev_busy_level || nr_lagging) in ioc_adjust_base_vrate()
1002 trace_iocost_ioc_vrate_adj(ioc, vrate, in ioc_adjust_base_vrate()
1021 int idx = min_t(int, abs(ioc->busy_level), in ioc_adjust_base_vrate()
1025 if (ioc->busy_level > 0) in ioc_adjust_base_vrate()
1034 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, in ioc_adjust_base_vrate()
1037 ioc->vtime_base_rate = vrate; in ioc_adjust_base_vrate()
1038 ioc_refresh_margins(ioc); in ioc_adjust_base_vrate()
1042 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument
1049 vrate = atomic64_read(&ioc->vtime_rate); in ioc_now()
1060 seq = read_seqcount_begin(&ioc->period_seqcount); in ioc_now()
1061 now->vnow = ioc->period_at_vtime + in ioc_now()
1062 (now->now - ioc->period_at) * vrate; in ioc_now()
1063 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); in ioc_now()
1066 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument
1068 WARN_ON_ONCE(ioc->running != IOC_RUNNING); in ioc_start_period()
1070 write_seqcount_begin(&ioc->period_seqcount); in ioc_start_period()
1071 ioc->period_at = now->now; in ioc_start_period()
1072 ioc->period_at_vtime = now->vnow; in ioc_start_period()
1073 write_seqcount_end(&ioc->period_seqcount); in ioc_start_period()
1075 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); in ioc_start_period()
1076 add_timer(&ioc->timer); in ioc_start_period()
1087 struct ioc *ioc = iocg->ioc; in __propagate_weights() local
1090 lockdep_assert_held(&ioc->lock); in __propagate_weights()
1144 ioc->weights_updated = true; in __propagate_weights()
1147 static void commit_weights(struct ioc *ioc) in commit_weights() argument
1149 lockdep_assert_held(&ioc->lock); in commit_weights()
1151 if (ioc->weights_updated) { in commit_weights()
1154 atomic_inc(&ioc->hweight_gen); in commit_weights()
1155 ioc->weights_updated = false; in commit_weights()
1163 commit_weights(iocg->ioc); in propagate_weights()
1168 struct ioc *ioc = iocg->ioc; in current_hweight() local
1174 ioc_gen = atomic_read(&ioc->hweight_gen); in current_hweight()
1231 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1248 struct ioc *ioc = iocg->ioc; in weight_updated() local
1253 lockdep_assert_held(&ioc->lock); in weight_updated()
1263 struct ioc *ioc = iocg->ioc; in iocg_activate() local
1273 ioc_now(ioc, now); in iocg_activate()
1274 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1284 spin_lock_irq(&ioc->lock); in iocg_activate()
1286 ioc_now(ioc, now); in iocg_activate()
1289 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1307 vtarget = now->vnow - ioc->margins.target; in iocg_activate()
1319 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1320 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1330 if (ioc->running == IOC_IDLE) { in iocg_activate()
1331 ioc->running = IOC_RUNNING; in iocg_activate()
1332 ioc->dfgv_period_at = now->now; in iocg_activate()
1333 ioc->dfgv_period_rem = 0; in iocg_activate()
1334 ioc_start_period(ioc, now); in iocg_activate()
1338 spin_unlock_irq(&ioc->lock); in iocg_activate()
1342 spin_unlock_irq(&ioc->lock); in iocg_activate()
1348 struct ioc *ioc = iocg->ioc; in iocg_kick_delay() local
1376 ioc->period_us * ioc->vtime_base_rate); in iocg_kick_delay()
1416 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1439 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1496 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq() local
1513 lockdep_assert_held(&ioc->lock); in iocg_kick_waitq()
1562 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * in iocg_kick_waitq()
1564 expires += ioc->timer_slack_ns; in iocg_kick_waitq()
1569 abs(oexpires - expires) <= ioc->timer_slack_ns) in iocg_kick_waitq()
1573 ioc->timer_slack_ns, HRTIMER_MODE_ABS); in iocg_kick_waitq()
1583 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1592 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) in ioc_lat_stat() argument
1600 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1628 ioc->period_us * NSEC_PER_USEC); in ioc_lat_stat()
1634 struct ioc *ioc = iocg->ioc; in iocg_is_idle() local
1638 atomic64_read(&ioc->cur_period)) in iocg_is_idle()
1698 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_leaf() local
1703 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_leaf()
1713 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_leaf()
1746 struct ioc *ioc = iocg->ioc; in hweight_after_donation() local
1756 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation()
1760 excess = now->vnow - vtime - ioc->margins.target; in hweight_after_donation()
1765 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); in hweight_after_donation()
1785 now->vnow - ioc->period_at_vtime); in hweight_after_donation()
2075 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, in ioc_forgive_debts() argument
2083 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2084 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2085 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2095 if (ioc->busy_level > 0) in ioc_forgive_debts()
2096 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); in ioc_forgive_debts()
2098 ioc->dfgv_usage_us_sum += usage_us_sum; in ioc_forgive_debts()
2099 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) in ioc_forgive_debts()
2106 dur = now->now - ioc->dfgv_period_at; in ioc_forgive_debts()
2107 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); in ioc_forgive_debts()
2109 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2110 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2114 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2127 nr_cycles = dur + ioc->dfgv_period_rem; in ioc_forgive_debts()
2128 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); in ioc_forgive_debts()
2130 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2168 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now) in ioc_check_iocgs() argument
2173 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_check_iocgs()
2213 excess = now->vnow - vtime - ioc->margins.target; in ioc_check_iocgs()
2218 ioc->vtime_err -= div64_u64(excess * old_hwi, in ioc_check_iocgs()
2224 atomic64_read(&ioc->cur_period), vtime); in ioc_check_iocgs()
2232 commit_weights(ioc); in ioc_check_iocgs()
2238 struct ioc *ioc = container_of(timer, struct ioc, timer); in ioc_timer_fn() local
2251 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); in ioc_timer_fn()
2254 spin_lock_irq(&ioc->lock); in ioc_timer_fn()
2256 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; in ioc_timer_fn()
2257 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; in ioc_timer_fn()
2258 ioc_now(ioc, &now); in ioc_timer_fn()
2260 period_vtime = now.vnow - ioc->period_at_vtime; in ioc_timer_fn()
2262 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2266 nr_debtors = ioc_check_iocgs(ioc, &now); in ioc_timer_fn()
2272 iocg_flush_stat(&ioc->active_iocgs, &now); in ioc_timer_fn()
2275 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2312 time_before64(vtime, now.vnow - ioc->margins.low))) { in ioc_timer_fn()
2319 ioc->vtime_base_rate); in ioc_timer_fn()
2325 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2328 usage_dur = max_t(u64, now.now - ioc->period_at, 1); in ioc_timer_fn()
2382 commit_weights(ioc); in ioc_timer_fn()
2394 prev_busy_level = ioc->busy_level; in ioc_timer_fn()
2399 ioc->busy_level = max(ioc->busy_level, 0); in ioc_timer_fn()
2400 ioc->busy_level++; in ioc_timer_fn()
2410 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
2417 ioc->busy_level--; in ioc_timer_fn()
2425 ioc->busy_level = 0; in ioc_timer_fn()
2429 ioc->busy_level = 0; in ioc_timer_fn()
2432 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); in ioc_timer_fn()
2434 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages, in ioc_timer_fn()
2437 ioc_refresh_params(ioc, false); in ioc_timer_fn()
2439 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); in ioc_timer_fn()
2445 atomic64_inc(&ioc->cur_period); in ioc_timer_fn()
2447 if (ioc->running != IOC_STOP) { in ioc_timer_fn()
2448 if (!list_empty(&ioc->active_iocgs)) { in ioc_timer_fn()
2449 ioc_start_period(ioc, &now); in ioc_timer_fn()
2451 ioc->busy_level = 0; in ioc_timer_fn()
2452 ioc->vtime_err = 0; in ioc_timer_fn()
2453 ioc->running = IOC_IDLE; in ioc_timer_fn()
2456 ioc_refresh_vrate(ioc, &now); in ioc_timer_fn()
2459 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2465 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost() local
2466 struct ioc_margins *margins = &ioc->margins; in adjust_inuse_and_calc_cost()
2490 spin_lock_irqsave(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2494 spin_unlock_irqrestore(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2515 spin_unlock_irqrestore(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2526 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin() local
2538 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; in calc_vtime_cost_builtin()
2539 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; in calc_vtime_cost_builtin()
2540 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; in calc_vtime_cost_builtin()
2543 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; in calc_vtime_cost_builtin()
2544 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; in calc_vtime_cost_builtin()
2545 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; in calc_vtime_cost_builtin()
2576 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, in calc_size_vtime_cost_builtin() argument
2583 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; in calc_size_vtime_cost_builtin()
2586 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; in calc_size_vtime_cost_builtin()
2593 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) in calc_size_vtime_cost() argument
2597 calc_size_vtime_cost_builtin(rq, ioc, &cost); in calc_size_vtime_cost()
2604 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_throttle() local
2613 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2740 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_merge() local
2747 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2754 ioc_now(ioc, &now); in ioc_rqos_merge()
2779 spin_lock_irqsave(&ioc->lock, flags); in ioc_rqos_merge()
2792 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_rqos_merge()
2805 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_done() local
2810 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) in ioc_rqos_done()
2828 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); in ioc_rqos_done()
2830 ccs = get_cpu_ptr(ioc->pcpu_stat); in ioc_rqos_done()
2833 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) in ioc_rqos_done()
2845 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_queue_depth_changed() local
2847 spin_lock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2848 ioc_refresh_params(ioc, false); in ioc_rqos_queue_depth_changed()
2849 spin_unlock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2854 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_exit() local
2858 spin_lock_irq(&ioc->lock); in ioc_rqos_exit()
2859 ioc->running = IOC_STOP; in ioc_rqos_exit()
2860 spin_unlock_irq(&ioc->lock); in ioc_rqos_exit()
2862 timer_shutdown_sync(&ioc->timer); in ioc_rqos_exit()
2863 free_percpu(ioc->pcpu_stat); in ioc_rqos_exit()
2864 kfree(ioc); in ioc_rqos_exit()
2878 struct ioc *ioc; in blk_iocost_init() local
2881 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in blk_iocost_init()
2882 if (!ioc) in blk_iocost_init()
2885 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); in blk_iocost_init()
2886 if (!ioc->pcpu_stat) { in blk_iocost_init()
2887 kfree(ioc); in blk_iocost_init()
2892 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
2901 spin_lock_init(&ioc->lock); in blk_iocost_init()
2902 timer_setup(&ioc->timer, ioc_timer_fn, 0); in blk_iocost_init()
2903 INIT_LIST_HEAD(&ioc->active_iocgs); in blk_iocost_init()
2905 ioc->running = IOC_IDLE; in blk_iocost_init()
2906 ioc->vtime_base_rate = VTIME_PER_USEC; in blk_iocost_init()
2907 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in blk_iocost_init()
2908 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); in blk_iocost_init()
2909 ioc->period_at = ktime_to_us(blk_time_get()); in blk_iocost_init()
2910 atomic64_set(&ioc->cur_period, 0); in blk_iocost_init()
2911 atomic_set(&ioc->hweight_gen, 0); in blk_iocost_init()
2913 spin_lock_irq(&ioc->lock); in blk_iocost_init()
2914 ioc->autop_idx = AUTOP_INVALID; in blk_iocost_init()
2915 ioc_refresh_params_disk(ioc, true, disk); in blk_iocost_init()
2916 spin_unlock_irq(&ioc->lock); in blk_iocost_init()
2924 ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops); in blk_iocost_init()
2934 rq_qos_del(&ioc->rqos); in blk_iocost_init()
2936 free_percpu(ioc->pcpu_stat); in blk_iocost_init()
2937 kfree(ioc); in blk_iocost_init()
2982 struct ioc *ioc = q_to_ioc(blkg->q); in ioc_pd_init() local
2987 ioc_now(ioc, &now); in ioc_pd_init()
2989 iocg->ioc = ioc; in ioc_pd_init()
2992 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
3010 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_init()
3012 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_init()
3018 struct ioc *ioc = iocg->ioc; in ioc_pd_free() local
3021 if (ioc) { in ioc_pd_free()
3022 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_free()
3027 ioc_now(ioc, &now); in ioc_pd_free()
3035 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_free()
3046 struct ioc *ioc = iocg->ioc; in ioc_pd_stat() local
3048 if (!ioc->enabled) in ioc_pd_stat()
3053 ioc->vtime_base_rate * 10000, in ioc_pd_stat()
3116 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3117 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3119 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3144 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3146 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3148 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3164 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_qos_prfill() local
3169 spin_lock(&ioc->lock); in ioc_qos_prfill()
3171 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", in ioc_qos_prfill()
3172 ioc->params.qos[QOS_RPPM] / 10000, in ioc_qos_prfill()
3173 ioc->params.qos[QOS_RPPM] % 10000 / 100, in ioc_qos_prfill()
3174 ioc->params.qos[QOS_RLAT], in ioc_qos_prfill()
3175 ioc->params.qos[QOS_WPPM] / 10000, in ioc_qos_prfill()
3176 ioc->params.qos[QOS_WPPM] % 10000 / 100, in ioc_qos_prfill()
3177 ioc->params.qos[QOS_WLAT], in ioc_qos_prfill()
3178 ioc->params.qos[QOS_MIN] / 10000, in ioc_qos_prfill()
3179 ioc->params.qos[QOS_MIN] % 10000 / 100, in ioc_qos_prfill()
3180 ioc->params.qos[QOS_MAX] / 10000, in ioc_qos_prfill()
3181 ioc->params.qos[QOS_MAX] % 10000 / 100); in ioc_qos_prfill()
3182 spin_unlock(&ioc->lock); in ioc_qos_prfill()
3216 struct ioc *ioc; in ioc_qos_write() local
3235 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3236 if (!ioc) { in ioc_qos_write()
3240 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3246 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3247 memcpy(qos, ioc->params.qos, sizeof(qos)); in ioc_qos_write()
3248 enable = ioc->enabled; in ioc_qos_write()
3249 user = ioc->user_qos_params; in ioc_qos_write()
3317 if (enable && !ioc->enabled) { in ioc_qos_write()
3320 ioc->enabled = true; in ioc_qos_write()
3321 } else if (!enable && ioc->enabled) { in ioc_qos_write()
3324 ioc->enabled = false; in ioc_qos_write()
3328 memcpy(ioc->params.qos, qos, sizeof(qos)); in ioc_qos_write()
3329 ioc->user_qos_params = true; in ioc_qos_write()
3331 ioc->user_qos_params = false; in ioc_qos_write()
3334 ioc_refresh_params(ioc, true); in ioc_qos_write()
3335 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3348 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3363 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_cost_model_prfill() local
3364 u64 *u = ioc->params.i_lcoefs; in ioc_cost_model_prfill()
3369 spin_lock(&ioc->lock); in ioc_cost_model_prfill()
3373 dname, ioc->user_cost_model ? "user" : "auto", in ioc_cost_model_prfill()
3376 spin_unlock(&ioc->lock); in ioc_cost_model_prfill()
3410 struct ioc *ioc; in ioc_cost_model_write() local
3429 ioc = q_to_ioc(q); in ioc_cost_model_write()
3430 if (!ioc) { in ioc_cost_model_write()
3434 ioc = q_to_ioc(q); in ioc_cost_model_write()
3440 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3441 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); in ioc_cost_model_write()
3442 user = ioc->user_cost_model; in ioc_cost_model_write()
3480 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); in ioc_cost_model_write()
3481 ioc->user_cost_model = true; in ioc_cost_model_write()
3483 ioc->user_cost_model = false; in ioc_cost_model_write()
3485 ioc_refresh_params(ioc, true); in ioc_cost_model_write()
3486 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()
3495 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()