Lines Matching refs:cpu
277 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument
279 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message()
289 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
291 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass()
297 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
351 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
354 smp_ops->message_pass(cpu, msg); in do_message_pass()
357 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
361 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
364 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in arch_smp_send_reschedule()
368 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
370 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_single_ipi()
375 unsigned int cpu; in arch_send_call_function_ipi_mask() local
377 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
378 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
469 static void do_smp_send_nmi_ipi(int cpu, bool safe) in do_smp_send_nmi_ipi() argument
471 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
474 if (cpu >= 0) { in do_smp_send_nmi_ipi()
475 do_message_pass(cpu, PPC_MSG_NMI_IPI); in do_smp_send_nmi_ipi()
493 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), in __smp_send_nmi_ipi() argument
500 BUG_ON(cpu == me); in __smp_send_nmi_ipi()
501 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi()
517 if (cpu < 0) { in __smp_send_nmi_ipi()
522 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); in __smp_send_nmi_ipi()
529 do_smp_send_nmi_ipi(cpu, safe); in __smp_send_nmi_ipi()
558 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_nmi_ipi() argument
560 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); in smp_send_nmi_ipi()
563 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_safe_nmi_ipi() argument
565 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); in smp_send_safe_nmi_ipi()
572 unsigned int cpu; in tick_broadcast() local
574 for_each_cpu(cpu, mask) in tick_broadcast()
575 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); in tick_broadcast()
594 int cpu; in crash_send_ipi() local
598 for_each_present_cpu(cpu) { in crash_send_ipi()
599 if (cpu_online(cpu)) in crash_send_ipi()
610 do_smp_send_nmi_ipi(cpu, false); in crash_send_ipi()
866 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) in get_cpu_thread_group_start() argument
868 int hw_cpu_id = get_hard_smp_processor_id(cpu); in get_cpu_thread_group_start()
885 static struct thread_groups *__init get_thread_groups(int cpu, in get_thread_groups() argument
889 struct device_node *dn = of_get_cpu_node(cpu, NULL); in get_thread_groups()
890 struct thread_groups_list *cpu_tgl = &tgl[cpu]; in get_thread_groups()
921 int cpu, int cpu_group_start) in update_mask_from_threadgroup() argument
923 int first_thread = cpu_first_thread_sibling(cpu); in update_mask_from_threadgroup()
926 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); in update_mask_from_threadgroup()
943 static int __init init_thread_group_cache_map(int cpu, int cache_property) in init_thread_group_cache_map() argument
954 tg = get_thread_groups(cpu, cache_property, &err); in init_thread_group_cache_map()
959 cpu_group_start = get_cpu_thread_group_start(cpu, tg); in init_thread_group_cache_map()
967 mask = &per_cpu(thread_group_l1_cache_map, cpu); in init_thread_group_cache_map()
968 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
971 mask = &per_cpu(thread_group_l2_cache_map, cpu); in init_thread_group_cache_map()
972 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
973 mask = &per_cpu(thread_group_l3_cache_map, cpu); in init_thread_group_cache_map()
974 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
1030 static const struct cpumask *shared_cache_mask(int cpu) in shared_cache_mask() argument
1032 return per_cpu(cpu_l2_cache_map, cpu); in shared_cache_mask()
1036 static const struct cpumask *smallcore_smt_mask(int cpu) in smallcore_smt_mask() argument
1038 return cpu_smallcore_mask(cpu); in smallcore_smt_mask()
1042 static struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
1044 return per_cpu(cpu_coregroup_map, cpu); in cpu_coregroup_mask()
1056 static const struct cpumask *cpu_mc_mask(int cpu) in cpu_mc_mask() argument
1058 return cpu_coregroup_mask(cpu); in cpu_mc_mask()
1063 int cpu; in init_big_cores() local
1065 for_each_possible_cpu(cpu) { in init_big_cores()
1066 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); in init_big_cores()
1071 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores()
1073 cpu_to_node(cpu)); in init_big_cores()
1078 for_each_possible_cpu(cpu) { in init_big_cores()
1079 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); in init_big_cores()
1094 unsigned int cpu, num_threads; in smp_prepare_cpus() local
1108 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
1109 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
1110 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1111 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus()
1112 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1113 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
1114 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1116 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
1117 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1123 if (cpu_present(cpu)) { in smp_prepare_cpus()
1124 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus()
1125 set_cpu_numa_mem(cpu, in smp_prepare_cpus()
1126 local_memory_node(numa_cpu_lookup_table[cpu])); in smp_prepare_cpus()
1183 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
1185 if (cpu == boot_cpuid) in generic_cpu_disable()
1188 set_cpu_online(cpu, false); in generic_cpu_disable()
1210 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
1216 if (is_cpu_dead(cpu)) in generic_cpu_die()
1220 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
1223 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
1225 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
1233 void generic_set_cpu_up(unsigned int cpu) in generic_set_cpu_up() argument
1235 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
1238 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
1240 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
1243 int is_cpu_dead(unsigned int cpu) in is_cpu_dead() argument
1245 return per_cpu(cpu_state, cpu) == CPU_DEAD; in is_cpu_dead()
1259 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) in cpu_idle_thread_init() argument
1262 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1263 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1266 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init()
1267 secondary_current = current_set[cpu] = idle; in cpu_idle_thread_init()
1270 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1283 cpu_thread_in_subcore(cpu)) in __cpu_up()
1287 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1290 cpu_idle_thread_init(cpu, tidle); in __cpu_up()
1297 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1305 cpu_callin_map[cpu] = 0; in __cpu_up()
1314 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
1315 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1317 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
1329 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); in __cpu_up()
1331 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { in __cpu_up()
1336 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) in __cpu_up()
1340 if (!cpu_callin_map[cpu]) { in __cpu_up()
1341 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
1345 DBG("Processor %u found.\n", cpu); in __cpu_up()
1351 spin_until_cond(cpu_online(cpu)); in __cpu_up()
1359 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
1364 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
1376 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
1378 return cpu >> threads_shift; in cpu_core_index_of_thread()
1391 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
1396 if (!cpu_present(cpu)) in cpu_to_l2cache()
1399 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
1410 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) in update_mask_by_l2() argument
1424 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1426 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { in update_mask_by_l2()
1428 set_cpus_related(i, cpu, cpu_l2_cache_mask); in update_mask_by_l2()
1432 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && in update_mask_by_l2()
1433 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { in update_mask_by_l2()
1435 cpu); in update_mask_by_l2()
1441 l2_cache = cpu_to_l2cache(cpu); in update_mask_by_l2()
1444 for_each_cpu(i, cpu_sibling_mask(cpu)) in update_mask_by_l2()
1445 set_cpus_related(cpu, i, cpu_l2_cache_mask); in update_mask_by_l2()
1450 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_mask_by_l2()
1453 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1456 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1467 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1481 static void remove_cpu_from_masks(int cpu) in remove_cpu_from_masks() argument
1486 unmap_cpu_from_node(cpu); in remove_cpu_from_masks()
1491 for_each_cpu(i, mask_fn(cpu)) { in remove_cpu_from_masks()
1492 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); in remove_cpu_from_masks()
1493 set_cpus_unrelated(cpu, i, cpu_sibling_mask); in remove_cpu_from_masks()
1495 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); in remove_cpu_from_masks()
1498 for_each_cpu(i, cpu_core_mask(cpu)) in remove_cpu_from_masks()
1499 set_cpus_unrelated(cpu, i, cpu_core_mask); in remove_cpu_from_masks()
1502 for_each_cpu(i, cpu_coregroup_mask(cpu)) in remove_cpu_from_masks()
1503 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); in remove_cpu_from_masks()
1508 static inline void add_cpu_to_smallcore_masks(int cpu) in add_cpu_to_smallcore_masks() argument
1515 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks()
1517 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { in add_cpu_to_smallcore_masks()
1519 set_cpus_related(i, cpu, cpu_smallcore_mask); in add_cpu_to_smallcore_masks()
1523 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) in update_coregroup_mask() argument
1526 int coregroup_id = cpu_to_coregroup_id(cpu); in update_coregroup_mask()
1534 for_each_cpu(i, submask_fn(cpu)) in update_coregroup_mask()
1535 set_cpus_related(cpu, i, cpu_coregroup_mask); in update_coregroup_mask()
1540 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_coregroup_mask()
1543 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1546 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); in update_coregroup_mask()
1551 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1559 static void add_cpu_to_masks(int cpu) in add_cpu_to_masks() argument
1562 int first_thread = cpu_first_thread_sibling(cpu); in add_cpu_to_masks()
1572 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks()
1573 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks()
1574 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks()
1578 set_cpus_related(i, cpu, cpu_sibling_mask); in add_cpu_to_masks()
1580 add_cpu_to_smallcore_masks(cpu); in add_cpu_to_masks()
1583 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); in add_cpu_to_masks()
1584 update_mask_by_l2(cpu, &mask); in add_cpu_to_masks()
1587 update_coregroup_mask(cpu, &mask); in add_cpu_to_masks()
1590 chip_id = cpu_to_chip_id(cpu); in add_cpu_to_masks()
1596 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1599 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); in add_cpu_to_masks()
1603 cpumask_and(mask, mask, cpu_cpu_mask(cpu)); in add_cpu_to_masks()
1607 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1621 unsigned int cpu = raw_smp_processor_id(); in start_secondary() local
1630 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in start_secondary()
1633 smp_store_cpu_info(cpu); in start_secondary()
1635 rcutree_report_cpu_starting(cpu); in start_secondary()
1636 cpu_callin_map[cpu] = 1; in start_secondary()
1639 smp_ops->setup_cpu(cpu); in start_secondary()
1651 set_numa_node(numa_cpu_lookup_table[cpu]); in start_secondary()
1652 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); in start_secondary()
1655 add_cpu_to_masks(cpu); in start_secondary()
1663 struct cpumask *mask = cpu_l2_cache_mask(cpu); in start_secondary()
1668 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) in start_secondary()
1673 notify_cpu_starting(cpu); in start_secondary()
1674 set_cpu_online(cpu, true); in start_secondary()
1749 int arch_asym_cpu_priority(int cpu) in arch_asym_cpu_priority() argument
1752 return -cpu / threads_per_core; in arch_asym_cpu_priority()
1754 return -cpu; in arch_asym_cpu_priority()
1760 int cpu = smp_processor_id(); in __cpu_disable() local
1773 remove_cpu_from_masks(cpu); in __cpu_disable()
1778 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1784 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm))); in __cpu_die()
1786 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
1789 smp_ops->cpu_die(cpu); in __cpu_die()