Lines Matching +full:odd +full:- +full:numbered

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
116 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
117 * the set its siblings that share the L1-cache.
122 * On some big-cores system, thread_group_l2_cache_map for each CPU
124 * L2-cache.
149 /* Special case - we inhibit secondary thread startup in smp_generic_cpu_bootable()
168 return -EINVAL; in smp_generic_kick_cpu()
172 * cpu_start field to become non-zero After we set cpu_start, in smp_generic_kick_cpu()
175 if (!paca_ptrs[nr]->cpu_start) { in smp_generic_kick_cpu()
176 paca_ptrs[nr]->cpu_start = 1; in smp_generic_kick_cpu()
183 * Ok it's not there, so it might be soft-unplugged, let's in smp_generic_kick_cpu()
235 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
243 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
256 return -EINVAL; in smp_request_message_ipi()
280 char *message = (char *)&info->messages; in smp_muxed_ipi_set_message()
297 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
301 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
313 /* sync-free variant. Callers should ensure synchronization */
321 all = xchg(&info->messages, 0); in smp_ipi_demux_relaxed()
345 } while (READ_ONCE(info->messages)); in smp_ipi_demux_relaxed()
353 if (smp_ops->message_pass) in do_message_pass()
354 smp_ops->message_pass(cpu, msg); in do_message_pass()
392 * concurrency or re-entrancy.
471 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
488 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
489 * - fn is the target callback function.
490 * - delay_us > 0 is the delay before giving up waiting for targets to
538 delay_us--; in __smp_send_nmi_ipi()
622 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before in crash_smp_send_stop()
707 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; in smp_store_cpu_info()
712 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
754 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
761 * output of "ibm,thread-groups" is stored.
763 * ibm,thread-groups[0..N-1] array defines which group of threads in
764 * the CPU-device node can be grouped together based on the property.
768 * ibm,thread-groups[i + 0] tells us the property based on which the
774 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
775 * property ibm,thread-groups[i]
777 * ibm,thread-groups[i+2] tells us the number of threads in each such
779 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
781 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
782 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
786 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
794 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
796 * "ibm,ppc-interrupt-server#s" of the second group is
802 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
804 * "ibm,ppc-interrupt-server#s" of the second group is
806 * group share the L2-cache.
808 * Returns 0 on success, -EINVAL if the property does not exist,
809 * -ENODATA if property does not have a value, and -EOVERFLOW if the
822 count = of_property_count_u32_elems(dn, "ibm,thread-groups"); in parse_thread_groups()
824 ret = of_property_read_u32_array(dn, "ibm,thread-groups", in parse_thread_groups()
831 struct thread_groups *tg = &tglp->property_tgs[property_idx++]; in parse_thread_groups()
833 tg->property = thread_group_array[i]; in parse_thread_groups()
834 tg->nr_groups = thread_group_array[i + 1]; in parse_thread_groups()
835 tg->threads_per_group = thread_group_array[i + 2]; in parse_thread_groups()
836 total_threads = tg->nr_groups * tg->threads_per_group; in parse_thread_groups()
841 tg->thread_list[j] = thread_list[j]; in parse_thread_groups()
845 tglp->nr_properties = property_idx; in parse_thread_groups()
853 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
857 * @tg : The thread-group structure of the CPU node which @cpu belongs
860 * Returns the index to tg->thread_list that points to the start
863 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
864 * tg->thread_list.
871 for (i = 0; i < tg->nr_groups; i++) { in get_cpu_thread_group_start()
872 int group_start = i * tg->threads_per_group; in get_cpu_thread_group_start()
874 for (j = 0; j < tg->threads_per_group; j++) { in get_cpu_thread_group_start()
877 if (tg->thread_list[idx] == hw_cpu_id) in get_cpu_thread_group_start()
882 return -1; in get_cpu_thread_group_start()
896 *err = -ENODATA; in get_thread_groups()
900 if (!cpu_tgl->nr_properties) { in get_thread_groups()
906 for (i = 0; i < cpu_tgl->nr_properties; i++) { in get_thread_groups()
907 if (cpu_tgl->property_tgs[i].property == group_property) { in get_thread_groups()
908 tg = &cpu_tgl->property_tgs[i]; in get_thread_groups()
914 *err = -EINVAL; in get_thread_groups()
931 if (unlikely(i_group_start == -1)) { in update_mask_from_threadgroup()
933 return -ENODATA; in update_mask_from_threadgroup()
946 int cpu_group_start = -1, err = 0; in init_thread_group_cache_map()
952 return -EINVAL; in init_thread_group_cache_map()
961 if (unlikely(cpu_group_start == -1)) { in init_thread_group_cache_map()
963 return -ENODATA; in init_thread_group_cache_map()
999 * independent thread groups per core), prefer lower numbered CPUs, so
1005 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1028 * returns a non-const pointer and the compiler barfs on that.
1145 if (cpu_to_chip_id(boot_cpuid) != -1) { in smp_prepare_cpus()
1151 * Assumption: if boot_cpuid doesn't have a chip-id, then no in smp_prepare_cpus()
1152 * other CPUs, will also not have chip-id. in smp_prepare_cpus()
1156 memset(chip_id_lookup_table, -1, sizeof(int) * idx); in smp_prepare_cpus()
1159 if (smp_ops && smp_ops->probe) in smp_prepare_cpus()
1160 smp_ops->probe(); in smp_prepare_cpus()
1173 paca_ptrs[boot_cpuid]->__current = current; in smp_prepare_boot_cpu()
1186 return -EBUSY; in generic_cpu_disable()
1190 vdso_data->processorCount--; in generic_cpu_disable()
1262 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1263 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1264 THREAD_SIZE - STACK_FRAME_MIN_SIZE; in cpu_idle_thread_init()
1266 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init()
1284 return -EBUSY; in __cpu_up()
1287 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1288 return -EINVAL; in __cpu_up()
1296 if (smp_ops->prepare_cpu) { in __cpu_up()
1297 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1302 /* Make sure callin-map entry is 0 (can be leftover a CPU in __cpu_up()
1315 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1342 return -ENOENT; in __cpu_up()
1347 if (smp_ops->give_timebase) in __cpu_up()
1348 smp_ops->give_timebase(); in __cpu_up()
1362 int id = -1; in cpu_to_core_id()
1420 * If the threads in a thread-group share L2 cache, then the in update_mask_by_l2()
1421 * L2-mask can be obtained from thread_group_l2_cache_map. in update_mask_by_l2()
1431 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */ in update_mask_by_l2()
1452 /* Update l2-cache mask with all the CPUs that are part of submask */ in update_mask_by_l2()
1455 /* Skip all CPUs already part of current CPU l2-cache mask */ in update_mask_by_l2()
1465 /* Skip all CPUs already part of current CPU l2-cache */ in update_mask_by_l2()
1564 int chip_id = -1; in add_cpu_to_masks()
1582 /* In CPU-hotplug path, hence use GFP_ATOMIC */ in add_cpu_to_masks()
1601 /* If chip_id is -1; limit the cpu_core_mask to within PKG */ in add_cpu_to_masks()
1602 if (chip_id == -1) in add_cpu_to_masks()
1628 current->active_mm = &init_mm; in start_secondary()
1638 if (smp_ops->setup_cpu) in start_secondary()
1639 smp_ops->setup_cpu(cpu); in start_secondary()
1640 if (smp_ops->take_timebase) in start_secondary()
1641 smp_ops->take_timebase(); in start_secondary()
1647 vdso_data->processorCount++; in start_secondary()
1659 * per-core basis because one core in the pair might be disabled. in start_secondary()
1724 BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1); in build_sched_topology()
1734 if (smp_ops && smp_ops->setup_cpu) in smp_cpus_done()
1735 smp_ops->setup_cpu(boot_cpuid); in smp_cpus_done()
1737 if (smp_ops && smp_ops->bringup_done) in smp_cpus_done()
1738 smp_ops->bringup_done(); in smp_cpus_done()
1745 * For asym packing, by default lower numbered CPU has higher priority.
1746 * On shared processors, pack to lower numbered core. However avoid moving
1752 return -cpu / threads_per_core; in arch_asym_cpu_priority()
1754 return -cpu; in arch_asym_cpu_priority()
1763 if (!smp_ops->cpu_disable) in __cpu_disable()
1764 return -ENOSYS; in __cpu_disable()
1768 err = smp_ops->cpu_disable(); in __cpu_disable()
1788 if (smp_ops->cpu_die) in __cpu_die()
1789 smp_ops->cpu_die(cpu); in __cpu_die()
1795 * Disable on the down path. This will be re-enabled by in arch_cpu_idle_dead()
1800 if (smp_ops->cpu_offline_self) in arch_cpu_idle_dead()
1801 smp_ops->cpu_offline_self(); in arch_cpu_idle_dead()
1803 /* If we return, we re-enter start_secondary */ in arch_cpu_idle_dead()