/linux-6.12.1/arch/x86/kernel/cpu/ |
D | topology_common.c | 95 apicid : 8; in parse_topology() member 105 c->topo.initial_apicid = ebx.apicid; in parse_topology() 113 c->topo.apicid = c->topo.initial_apicid; in parse_topology() 115 c->topo.apicid = read_apic_id(); in parse_topology() 146 u32 apicid = c->topo.apicid; in topo_set_ids() local 148 c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN); in topo_set_ids() 149 c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN); in topo_set_ids() 152 c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN); in topo_set_ids() 153 c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN); in topo_set_ids() 157 c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >> in topo_set_ids() [all …]
|
D | topology.c | 78 static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) in cpu_mark_primary_thread() argument 80 if (!(apicid & (__max_threads_per_core - 1))) in cpu_mark_primary_thread() 84 static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { } in cpu_mark_primary_thread() argument 91 static inline u32 topo_apicid(u32 apicid, enum x86_topology_domains dom) in topo_apicid() argument 94 return apicid; in topo_apicid() 95 return apicid & (UINT_MAX << x86_topo_system.dom_shifts[dom - 1]); in topo_apicid() 325 int topology_get_logical_id(u32 apicid, enum x86_topology_domains at_level) in topology_get_logical_id() argument 328 unsigned int lvlid = topo_apicid(apicid, at_level); in topology_get_logical_id() 357 unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_units, in topology_unit_count() argument 361 unsigned int lvlid = topo_apicid(apicid, at_level); in topology_unit_count() [all …]
|
D | topology.h | 26 static inline u32 topo_shift_apicid(u32 apicid, enum x86_topology_domains dom) in topo_shift_apicid() argument 29 return apicid; in topo_shift_apicid() 30 return apicid >> x86_topo_system.dom_shifts[dom - 1]; in topo_shift_apicid() 33 static inline u32 topo_relative_domain_id(u32 apicid, enum x86_topology_domains dom) in topo_relative_domain_id() argument 36 apicid >>= x86_topo_system.dom_shifts[dom - 1]; in topo_relative_domain_id() 37 return apicid & (x86_topo_system.dom_size[dom] - 1); in topo_relative_domain_id() 57 unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_units, 60 static inline unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_units, in topology_unit_count() argument
|
D | cacheinfo.c | 681 c->topo.llc_id = c->topo.apicid >> 3; in cacheinfo_amd_init_llc_id() 697 c->topo.llc_id = c->topo.apicid >> bits; in cacheinfo_amd_init_llc_id() 715 c->topo.llc_id = c->topo.apicid >> 3; in cacheinfo_hygon_init_llc_id() 776 l2_id = c->topo.apicid & ~((1 << index_msb) - 1); in init_intel_cacheinfo() 782 l3_id = c->topo.apicid & ~((1 << index_msb) - 1); in init_intel_cacheinfo() 906 unsigned int apicid, nshared, first, last; in __cache_amd_cpumap_setup() local 909 apicid = cpu_data(cpu).topo.apicid; in __cache_amd_cpumap_setup() 910 first = apicid - (apicid % nshared); in __cache_amd_cpumap_setup() 918 apicid = cpu_data(i).topo.apicid; in __cache_amd_cpumap_setup() 919 if ((apicid < first) || (apicid > last)) in __cache_amd_cpumap_setup() [all …]
|
D | hygon.c | 26 static int nearby_node(int apicid) in nearby_node() argument 30 for (i = apicid - 1; i >= 0; i--) { in nearby_node() 35 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { in nearby_node() 49 unsigned int apicid = c->topo.apicid; in srat_detect_node() local 88 node = nearby_node(apicid); in srat_detect_node()
|
D | amd.c | 280 static int nearby_node(int apicid) in nearby_node() argument 284 for (i = apicid - 1; i >= 0; i--) { in nearby_node() 289 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { in nearby_node() 303 unsigned apicid = c->topo.apicid; in srat_detect_node() local 343 node = nearby_node(apicid); in srat_detect_node()
|
/linux-6.12.1/arch/x86/kernel/apic/ |
D | apic_numachip.c | 26 static void (*numachip_apic_icr_write)(int apicid, unsigned int val) __read_mostly; 49 static void numachip1_apic_icr_write(int apicid, unsigned int val) in numachip1_apic_icr_write() argument 51 write_lcsr(CSR_G3_EXT_IRQ_GEN, (apicid << 16) | val); in numachip1_apic_icr_write() 54 static void numachip2_apic_icr_write(int apicid, unsigned int val) in numachip2_apic_icr_write() argument 56 numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val); in numachip2_apic_icr_write() 70 int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu); in numachip_send_IPI_one() local 77 if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) { in numachip_send_IPI_one() 81 __default_send_IPI_dest_field(apicid, vector, in numachip_send_IPI_one() 90 numachip_apic_icr_write(apicid, dmode | vector); in numachip_send_IPI_one()
|
D | x2apic_cluster.c | 12 #define apic_cluster(apicid) ((apicid) >> 4) argument 111 u32 apicid = apic->cpu_present_to_apicid(cpu_i); in prefill_clustermask() local 113 if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster) in prefill_clustermask() 147 u32 apicid = apic->cpu_present_to_apicid(cpu_i); in alloc_clustermask() local 149 if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) { in alloc_clustermask()
|
D | x2apic_phys.c | 13 void __init x2apic_set_max_apicid(u32 apicid) in x2apic_set_max_apicid() argument 15 x2apic_max_apicid = apicid; in x2apic_set_max_apicid() 17 apic->max_apic_id = apicid; in x2apic_set_max_apicid() 110 void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) in __x2apic_send_IPI_dest() argument 113 native_x2apic_icr_write(cfg, apicid); in __x2apic_send_IPI_dest()
|
D | ipi.c | 305 u32 apicid; in safe_smp_processor_id() local 311 apicid = read_apic_id(); in safe_smp_processor_id() 312 if (apicid == BAD_APICID) in safe_smp_processor_id() 315 cpuid = convert_apicid_to_cpu(apicid); in safe_smp_processor_id()
|
D | apic_noop.c | 30 static int noop_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip) { return -1; } in noop_wakeup_secondary_cpu() argument 32 static u32 noop_get_apic_id(u32 apicid) { return 0; } in noop_get_apic_id() argument
|
D | x2apic_uv_x.c | 696 unsigned long apicid = per_cpu(x86_cpu_to_apicid, cpu); in uv_send_IPI_one() local 697 int pnode = uv_apicid_to_pnode(apicid); in uv_send_IPI_one() 706 (apicid << UVH_IPI_INT_APIC_ID_SHFT) | in uv_send_IPI_one() 1484 int i, lnid, apicid; in build_socket_tables() local 1535 for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) { in build_socket_tables() 1536 int nid = __apicid_to_node[apicid]; in build_socket_tables() 1543 sockid = apicid >> uv_cpuid.socketid_shift; in build_socket_tables() 1553 apicid, in build_socket_tables() 1757 int apicid = per_cpu(x86_cpu_to_apicid, cpu); in uv_system_init_hub() local 1761 pnode = uv_apicid_to_pnode(apicid); in uv_system_init_hub()
|
/linux-6.12.1/arch/x86/mm/ |
D | amdtopology.c | 58 unsigned int numnodes, cores, apicid; in amd_numa_init() local 165 apicid = boot_cpu_physical_apicid; in amd_numa_init() 166 if (apicid > 0) in amd_numa_init() 167 pr_info("BSP APIC ID: %02x\n", apicid); in amd_numa_init() 170 for (j = 0; j < cores; j++, apicid++) in amd_numa_init() 171 set_apicid_to_node(apicid, i); in amd_numa_init()
|
D | numa.c | 52 u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); in numa_cpu_node() local 54 if (apicid != BAD_APICID) in numa_cpu_node() 55 return __apicid_to_node[apicid]; in numa_cpu_node()
|
/linux-6.12.1/arch/x86/include/asm/ |
D | apicdef.h | 170 #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) argument 171 #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) argument 172 #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) argument
|
D | numa.h | 26 static inline void set_apicid_to_node(int apicid, s16 node) in set_apicid_to_node() argument 28 __apicid_to_node[apicid] = node; in set_apicid_to_node() 34 static inline void set_apicid_to_node(int apicid, s16 node) in set_apicid_to_node() argument
|
D | mpspec.h | 66 static inline void reset_phys_cpu_present_map(u32 apicid) in reset_phys_cpu_present_map() argument 69 set_bit(apicid, phys_cpu_present_map); in reset_phys_cpu_present_map()
|
D | apic.h | 246 extern void __init x2apic_set_max_apicid(u32 apicid); 316 int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip); 318 int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip); 336 int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip); 337 int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip); 523 typedef int (*wakeup_cpu_handler)(int apicid, unsigned long start_eip);
|
D | mpspec_def.h | 70 unsigned char apicid; /* Local APIC number */ member 108 unsigned char apicid; member
|
/linux-6.12.1/include/trace/events/ |
D | mce.h | 40 __field( u32, apicid ) 62 __entry->apicid = m->apicid; 85 __entry->apicid,
|
/linux-6.12.1/arch/x86/hyperv/ |
D | hv_vtl.c | 209 static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip) in hv_vtl_wakeup_secondary_cpu() argument 215 if (arch_match_cpu_phys_id(cpu, apicid)) in hv_vtl_wakeup_secondary_cpu() 221 pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid); in hv_vtl_wakeup_secondary_cpu() 222 vp_id = hv_vtl_apicid_to_vp_id(apicid); in hv_vtl_wakeup_secondary_cpu() 225 pr_err("Couldn't find CPU with APIC ID %d\n", apicid); in hv_vtl_wakeup_secondary_cpu() 229 pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid); in hv_vtl_wakeup_secondary_cpu()
|
/linux-6.12.1/arch/x86/kernel/ |
D | smpboot.c | 814 static void announce_cpu(int cpu, int apicid) in announce_cpu() argument 847 node, cpu, apicid); in announce_cpu() 878 static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle) in do_boot_cpu() argument 902 announce_cpu(cpu, apicid); in do_boot_cpu() 932 ret = apic->wakeup_secondary_cpu_64(apicid, start_ip); in do_boot_cpu() 934 ret = apic->wakeup_secondary_cpu(apicid, start_ip); in do_boot_cpu() 936 ret = wakeup_secondary_cpu_via_init(apicid, start_ip); in do_boot_cpu() 946 u32 apicid = apic->cpu_present_to_apicid(cpu); in native_kick_ap() local 953 if (apicid == BAD_APICID || !apic_id_valid(apicid)) { in native_kick_ap() 954 pr_err("CPU %u has invalid APIC ID %x. Aborting bringup\n", cpu, apicid); in native_kick_ap() [all …]
|
/linux-6.12.1/arch/x86/platform/uv/ |
D | uv_time.c | 73 unsigned long apicid, val; in uv_rtc_send_IPI() local 76 apicid = cpu_physical_id(cpu); in uv_rtc_send_IPI() 77 pnode = uv_apicid_to_pnode(apicid); in uv_rtc_send_IPI() 79 (apicid << UVH_IPI_INT_APIC_ID_SHFT) | in uv_rtc_send_IPI() 96 unsigned long apicid = cpu_physical_id(cpu); in uv_setup_intr() local 107 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); in uv_setup_intr()
|
/linux-6.12.1/arch/x86/kernel/acpi/ |
D | madt_wakeup.c | 40 u32 apicid = per_cpu(x86_cpu_to_apicid, cpu); in acpi_mp_cpu_die() local 49 acpi_mp_wake_mailbox->apic_id = apicid; in acpi_mp_cpu_die() 172 static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) in acpi_wakeup_cpu() argument 200 acpi_mp_wake_mailbox->apic_id = apicid; in acpi_wakeup_cpu()
|
/linux-6.12.1/arch/x86/kvm/ |
D | trace.h | 543 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 544 TP_ARGS(apicid, dm, tm, vec), 547 __field( __u32, apicid ) 554 __entry->apicid = apicid; 561 __entry->apicid, __entry->vec, 571 __field( __u32, apicid ) 576 __entry->apicid = apic->vcpu->vcpu_id; 580 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 588 __field( __u32, apicid ) 593 __entry->apicid = apic->vcpu->vcpu_id; [all …]
|