Home
last modified time | relevance | path

Searched refs:cpumask (Results 1 – 25 of 653) sorted by relevance

12345678910>>...27

/linux-6.12.1/kernel/bpf/
Dcpumask.c26 cpumask_t cpumask; member
51 struct bpf_cpumask *cpumask; in bpf_cpumask_create() local
54 BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0); in bpf_cpumask_create()
56 cpumask = bpf_mem_cache_alloc(&bpf_cpumask_ma); in bpf_cpumask_create()
57 if (!cpumask) in bpf_cpumask_create()
60 memset(cpumask, 0, sizeof(*cpumask)); in bpf_cpumask_create()
61 refcount_set(&cpumask->usage, 1); in bpf_cpumask_create()
63 return cpumask; in bpf_cpumask_create()
75 __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) in bpf_cpumask_acquire() argument
77 refcount_inc(&cpumask->usage); in bpf_cpumask_acquire()
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/progs/
Dcpumask_common.h21 struct bpf_cpumask __kptr * cpumask; member
32 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak;
33 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak;
34 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak;
35 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak;
36 u32 bpf_cpumask_first_and(const struct cpumask *src1,
37 const struct cpumask *src2) __ksym __weak;
38 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
39 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
40 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
[all …]
Dcpumask_success.c141 struct bpf_cpumask *cpumask; in BPF_PROG() local
146 cpumask = create_cpumask(); in BPF_PROG()
147 if (!cpumask) in BPF_PROG()
150 bpf_cpumask_release(cpumask); in BPF_PROG()
157 struct bpf_cpumask *cpumask; in BPF_PROG() local
162 cpumask = create_cpumask(); in BPF_PROG()
163 if (!cpumask) in BPF_PROG()
166 bpf_cpumask_set_cpu(0, cpumask); in BPF_PROG()
167 if (!bpf_cpumask_test_cpu(0, cast(cpumask))) { in BPF_PROG()
172 bpf_cpumask_clear_cpu(0, cpumask); in BPF_PROG()
[all …]
Dcpumask_failure.c38 struct bpf_cpumask *cpumask; in BPF_PROG() local
40 cpumask = create_cpumask(); in BPF_PROG()
41 __sink(cpumask); in BPF_PROG()
51 struct bpf_cpumask *cpumask; in BPF_PROG() local
53 cpumask = create_cpumask(); in BPF_PROG()
56 bpf_cpumask_release(cpumask); in BPF_PROG()
57 bpf_cpumask_release(cpumask); in BPF_PROG()
66 struct bpf_cpumask *cpumask; in BPF_PROG() local
69 cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); in BPF_PROG()
70 __sink(cpumask); in BPF_PROG()
[all …]
/linux-6.12.1/include/linux/
Dcpumask.h115 extern struct cpumask __cpu_possible_mask;
116 extern struct cpumask __cpu_online_mask;
117 extern struct cpumask __cpu_enabled_mask;
118 extern struct cpumask __cpu_present_mask;
119 extern struct cpumask __cpu_active_mask;
120 extern struct cpumask __cpu_dying_mask;
121 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
122 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
123 #define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
124 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
[all …]
Dpm_opp.h194 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
195 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
197 void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
425 static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) in dev_pm_opp_set_sharing_cpus() argument
430 static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) in dev_pm_opp_get_sharing_cpus() argument
439 static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) in dev_pm_opp_cpumask_remove_table() argument
471 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
472 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
473 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
479 int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
[all …]
Dstop_machine.h38 void stop_machine_yield(const struct cpumask *cpumask);
114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
144 const struct cpumask *cpus);
148 const struct cpumask *cpus) in stop_machine_cpuslocked()
159 stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine()
166 const struct cpumask *cpus) in stop_machine_from_inactive_cpu()
Dnmi.h26 extern struct cpumask watchdog_cpumask;
172 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) in trigger_cpumask_backtrace()
199 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) in trigger_cpumask_backtrace()
226 void nmi_backtrace_stall_snap(const struct cpumask *btp);
227 void nmi_backtrace_stall_check(const struct cpumask *btp);
229 static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} in nmi_backtrace_stall_snap()
230 static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} in nmi_backtrace_stall_check()
/linux-6.12.1/tools/sched_ext/include/scx/
Dcommon.bpf.h62 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
63 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
64 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
65 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
66 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
67 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
292 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
293 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
294 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
295 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
[all …]
/linux-6.12.1/Documentation/bpf/
Dcpumasks.rst6 BPF cpumask kfuncs
12 ``struct cpumask`` is a bitmap data structure in the kernel whose indices
21 2. BPF cpumask objects
29 ``struct bpf_cpumask *`` is a cpumask that is allocated by BPF, on behalf of a
32 to a ``struct cpumask *``.
40 .. kernel-doc:: kernel/bpf/cpumask.c
43 .. kernel-doc:: kernel/bpf/cpumask.c
46 .. kernel-doc:: kernel/bpf/cpumask.c
54 struct bpf_cpumask __kptr * cpumask;
71 local.cpumask = NULL;
[all …]
/linux-6.12.1/Documentation/translations/zh_CN/core-api/
Dpadata.rst57 cpumask_var_t cpumask);
60cpumask描述了哪些处理器将被用来并行执行提交给这个实例的作业,串行cpumask
61 定义了哪些处理器被允许用作串行化回调处理器。 cpumask指定了要使用的新cpumask
65 和serial_cpumask,任何一个cpumask都可以通过在文件中回显(echo)一个bitmask
70 读取其中一个文件会显示用户提供的cpumask,它可能与“可用”的cpumask不同。
72 Padata内部维护着两对cpumask,用户提供的cpumask和“可用的”cpumask(每一对由一个
73 并行和一个串行cpumask组成)。用户提供的cpumasks在实例分配时默认为所有可能的CPU,
76 供一个包含离线CPU的cpumask是合法的。一旦用户提供的cpumask中的一个离线CPU上线,
104 在其他地方正在搞乱实例的CPU掩码,而当cb_cpu不在串行cpumask中、并行或串行cpumasks
/linux-6.12.1/drivers/opp/
Dcpu.c108 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, in _dev_pm_opp_cpumask_remove_table() argument
114 WARN_ON(cpumask_empty(cpumask)); in _dev_pm_opp_cpumask_remove_table()
116 for_each_cpu(cpu, cpumask) { in _dev_pm_opp_cpumask_remove_table()
139 void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) in dev_pm_opp_cpumask_remove_table() argument
141 _dev_pm_opp_cpumask_remove_table(cpumask, -1); in dev_pm_opp_cpumask_remove_table()
156 const struct cpumask *cpumask) in dev_pm_opp_set_sharing_cpus() argument
167 for_each_cpu(cpu, cpumask) { in dev_pm_opp_set_sharing_cpus()
205 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) in dev_pm_opp_get_sharing_cpus() argument
220 cpumask_clear(cpumask); in dev_pm_opp_get_sharing_cpus()
225 cpumask_set_cpu(opp_dev->dev->id, cpumask); in dev_pm_opp_get_sharing_cpus()
[all …]
/linux-6.12.1/kernel/
Dpadata.c54 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
191 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel()
192 if (cpumask_empty(pd->cpumask.cbcpu)) in padata_do_parallel()
196 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel()
198 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel()
200 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel()
277 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next()
435 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); in padata_setup_cpumasks()
[all …]
Dstop_machine.c175 const struct cpumask *active_cpus;
197 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument
208 const struct cpumask *cpumask; in multi_cpu_stop() local
219 cpumask = cpu_online_mask; in multi_cpu_stop()
220 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop()
222 cpumask = msdata->active_cpus; in multi_cpu_stop()
223 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop()
229 stop_machine_yield(cpumask); in multi_cpu_stop()
391 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument
407 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work()
[all …]
/linux-6.12.1/drivers/powercap/
Didle_inject.c89 unsigned long cpumask[]; member
107 for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) { in idle_inject_wakeup()
230 cpumask_pr_args(to_cpumask(ii_dev->cpumask))); in idle_inject_start()
260 cpumask_pr_args(to_cpumask(ii_dev->cpumask))); in idle_inject_stop()
279 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) { in idle_inject_stop()
331 struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask, in idle_inject_register_full() argument
341 cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask); in idle_inject_register_full()
347 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) { in idle_inject_register_full()
360 for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) { in idle_inject_register_full()
383 struct idle_inject_device *idle_inject_register(struct cpumask *cpumask) in idle_inject_register() argument
[all …]
/linux-6.12.1/drivers/cpuidle/
Dcpuidle-big_little.c144 struct cpumask *cpumask; in bl_idle_driver_init() local
147 cpumask = kzalloc(cpumask_size(), GFP_KERNEL); in bl_idle_driver_init()
148 if (!cpumask) in bl_idle_driver_init()
153 cpumask_set_cpu(cpu, cpumask); in bl_idle_driver_init()
155 drv->cpumask = cpumask; in bl_idle_driver_init()
227 kfree(bl_idle_big_driver.cpumask); in bl_idle_init()
229 kfree(bl_idle_little_driver.cpumask); in bl_idle_init()
Ddt_idle_states.c97 const cpumask_t *cpumask) in idle_state_valid() argument
110 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); in idle_state_valid()
111 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { in idle_state_valid()
156 const cpumask_t *cpumask; in dt_init_idle_driver() local
167 cpumask = drv->cpumask ? : cpu_possible_mask; in dt_init_idle_driver()
168 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); in dt_init_idle_driver()
186 if (!idle_state_valid(state_node, i, cpumask)) { in dt_init_idle_driver()
Ddriver.c53 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_unset_driver()
73 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_set_driver()
81 for_each_cpu(cpu, drv->cpumask) in __cpuidle_set_driver()
164 if (!drv->cpumask) in __cpuidle_driver_init()
165 drv->cpumask = (struct cpumask *)cpu_possible_mask; in __cpuidle_driver_init()
233 on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, in __cpuidle_register_driver()
252 on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, in __cpuidle_unregister_driver()
371 if (!drv->cpumask) { in cpuidle_driver_state_disabled()
376 for_each_cpu(cpu, drv->cpumask) { in cpuidle_driver_state_disabled()
/linux-6.12.1/arch/arc/kernel/
Dsmp.c42 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) in arc_get_cpu_map() argument
51 if (cpulist_parse(buf, cpumask)) in arc_get_cpu_map()
63 struct cpumask cpumask; in arc_init_cpu_possible() local
65 if (arc_get_cpu_map("possible-cpus", &cpumask)) { in arc_init_cpu_possible()
69 cpumask_setall(&cpumask); in arc_init_cpu_possible()
72 if (!cpumask_test_cpu(0, &cpumask)) in arc_init_cpu_possible()
75 init_cpu_possible(&cpumask); in arc_init_cpu_possible()
283 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) in ipi_send_msg()
298 struct cpumask targets; in smp_send_stop()
309 void arch_send_call_function_ipi_mask(const struct cpumask *mask) in arch_send_call_function_ipi_mask()
/linux-6.12.1/include/trace/events/
Dipi.h21 TP_PROTO(const struct cpumask *mask, const char *reason),
62 TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
64 TP_ARGS(cpumask, callsite, callback),
67 __cpumask(cpumask)
73 __assign_cpumask(cpumask, cpumask_bits(cpumask));
79 __get_cpumask(cpumask), __entry->callsite, __entry->callback)
/linux-6.12.1/arch/x86/kernel/apic/
Dx2apic_cluster.c22 static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks);
39 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) in __x2apic_send_IPI_mask()
42 struct cpumask *tmpmsk; in __x2apic_send_IPI_mask()
58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
75 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) in x2apic_send_IPI_mask()
81 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) in x2apic_send_IPI_mask_allbutself()
93 struct cpumask *cmsk = this_cpu_read(cluster_masks); in init_x2apic_ldr()
105 static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster) in prefill_clustermask()
110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); in prefill_clustermask()
126 struct cpumask *cmsk = NULL; in alloc_clustermask()
[all …]
/linux-6.12.1/lib/
Dgroup_cpus.c14 static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, in grp_spread_init_one()
17 const struct cpumask *siblmsk; in grp_spread_init_one()
86 const struct cpumask *mask, nodemask_t *nodemsk) in get_nodes_in_cpumask()
132 const struct cpumask *cpu_mask, in alloc_nodes_groups()
134 struct cpumask *nmsk, in alloc_nodes_groups()
251 const struct cpumask *cpu_mask, in __group_cpus_evenly()
252 struct cpumask *nmsk, struct cpumask *masks) in __group_cpus_evenly()
347 struct cpumask *group_cpus_evenly(unsigned int numgrps) in group_cpus_evenly()
353 struct cpumask *masks = NULL; in group_cpus_evenly()
427 struct cpumask *group_cpus_evenly(unsigned int numgrps) in group_cpus_evenly()
[all …]
/linux-6.12.1/drivers/md/
Ddm-ps-io-affinity.c16 cpumask_var_t cpumask; member
36 free_cpumask_var(pi->cpumask); in ioa_free_path()
66 if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) { in ioa_add_path()
72 ret = cpumask_parse(argv[0], pi->cpumask); in ioa_add_path()
79 for_each_cpu(cpu, pi->cpumask) { in ioa_add_path()
105 free_cpumask_var(pi->cpumask); in ioa_add_path()
171 DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask)); in ioa_status()
201 const struct cpumask *cpumask; in ioa_select_path() local
219 cpumask = cpumask_of_node(node); in ioa_select_path()
220 for_each_cpu(i, cpumask) { in ioa_select_path()
/linux-6.12.1/tools/accounting/
Dprocacct.c84 char cpumask[100+6*MAX_CPUS]; variable
299 strncpy(cpumask, optarg, sizeof(cpumask)); in main()
300 cpumask[sizeof(cpumask) - 1] = '\0'; in main()
314 strncpy(cpumask, "1", sizeof(cpumask)); in main()
315 cpumask[sizeof(cpumask) - 1] = '\0'; in main()
317 printf("cpumask %s maskset %d\n", cpumask, maskset); in main()
342 &cpumask, strlen(cpumask) + 1); in main()
400 &cpumask, strlen(cpumask) + 1); in main()
/linux-6.12.1/arch/powerpc/include/asm/
Dsmp.h114 static inline struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask()
119 static inline struct cpumask *cpu_core_mask(int cpu) in cpu_core_mask()
124 static inline struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask()
129 static inline struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask()
142 static inline const struct cpumask *cpu_smt_mask(int cpu) in cpu_smt_mask()
194 static inline const struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask()
199 static inline const struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask()
204 static inline const struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask()
256 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);

12345678910>>...27