Lines Matching refs:cpu_map

377 static void perf_domain_debug(const struct cpumask *cpu_map,  in perf_domain_debug()  argument
383 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug()
425 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument
429 int cpu = cpumask_first(cpu_map); in build_perf_domains()
435 if (!sched_is_eas_possible(cpu_map)) in build_perf_domains()
438 for_each_cpu(i, cpu_map) { in build_perf_domains()
451 perf_domain_debug(cpu_map, pd); in build_perf_domains()
1353 const struct cpumask *cpu_map) in asym_cpu_capacity_classify() argument
1367 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) in asym_cpu_capacity_classify()
1491 static void __sdt_free(const struct cpumask *cpu_map);
1492 static int __sdt_alloc(const struct cpumask *cpu_map);
1495 const struct cpumask *cpu_map) in __free_domain_allocs() argument
1506 __sdt_free(cpu_map); in __free_domain_allocs()
1514 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1518 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell()
1589 const struct cpumask *cpu_map, in sd_init() argument
1644 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1647 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
2222 static int __sdt_alloc(const struct cpumask *cpu_map) in __sdt_alloc() argument
2246 for_each_cpu(j, cpu_map) { in __sdt_alloc()
2291 static void __sdt_free(const struct cpumask *cpu_map) in __sdt_free() argument
2299 for_each_cpu(j, cpu_map) { in __sdt_free()
2328 const struct cpumask *cpu_map, struct sched_domain_attr *attr, in build_sched_domain() argument
2331 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2362 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
2376 for_each_cpu_from(i, cpu_map) { in topology_span_sane()
2396 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) in build_sched_domains() argument
2406 if (WARN_ON(cpumask_empty(cpu_map))) in build_sched_domains()
2409 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); in build_sched_domains()
2414 for_each_cpu(i, cpu_map) { in build_sched_domains()
2420 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) in build_sched_domains()
2423 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
2431 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
2437 for_each_cpu(i, cpu_map) { in build_sched_domains()
2454 for_each_cpu(i, cpu_map) { in build_sched_domains()
2510 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2521 for_each_cpu(i, cpu_map) { in build_sched_domains()
2539 pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map)); in build_sched_domains()
2543 __free_domain_allocs(&d, alloc_state, cpu_map); in build_sched_domains()
2603 int __init sched_init_domains(const struct cpumask *cpu_map) in sched_init_domains() argument
2617 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); in sched_init_domains()
2627 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
2629 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains()
2639 for_each_cpu(i, cpu_map) in detach_destroy_domains()