/linux-6.12.1/fs/squashfs/ |
D | decompressor_multi_percpu.c | 31 struct squashfs_stream __percpu *percpu; in squashfs_decompressor_create() local 34 percpu = alloc_percpu(struct squashfs_stream); in squashfs_decompressor_create() 35 if (percpu == NULL) in squashfs_decompressor_create() 39 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 49 return (void *)(__force unsigned long) percpu; in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 57 free_percpu(percpu); in squashfs_decompressor_create() 63 struct squashfs_stream __percpu *percpu = in squashfs_decompressor_destroy() local 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy() 73 free_percpu(percpu); in squashfs_decompressor_destroy() [all …]
|
/linux-6.12.1/kernel/bpf/ |
D | memalloc.c | 255 static void free_one(void *obj, bool percpu) in free_one() argument 257 if (percpu) { in free_one() 266 static int free_all(struct llist_node *llnode, bool percpu) in free_all() argument 272 free_one(pos, percpu); in free_all() 512 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) in bpf_mem_alloc_init() argument 519 if (percpu && size == 0) in bpf_mem_alloc_init() 523 if (percpu) in bpf_mem_alloc_init() 525 ma->percpu = percpu; in bpf_mem_alloc_init() 532 if (!percpu) in bpf_mem_alloc_init() 590 ma->percpu = true; in bpf_mem_alloc_percpu_init() [all …]
|
D | bpf_lru_list.c | 501 if (lru->percpu) in bpf_lru_pop_free() 558 if (lru->percpu) in bpf_lru_push_free() 616 if (lru->percpu) in bpf_lru_populate() 651 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument 656 if (percpu) { in bpf_lru_init() 686 lru->percpu = percpu; in bpf_lru_init() 696 if (lru->percpu) in bpf_lru_destroy()
|
D | bpf_lru_list.h | 62 bool percpu; member 71 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
|
/linux-6.12.1/arch/alpha/boot/ |
D | main.c | 65 struct percpu_struct * percpu; in pal_init() local 95 percpu = (struct percpu_struct *) in pal_init() 97 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | bootp.c | 71 struct percpu_struct * percpu; in pal_init() local 101 percpu = (struct percpu_struct *) in pal_init() 103 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | bootpz.c | 119 struct percpu_struct * percpu; in pal_init() local 149 percpu = (struct percpu_struct *) in pal_init() 151 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
/linux-6.12.1/include/linux/ |
D | bpf_mem_alloc.h | 15 bool percpu; member 29 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu); 37 int bpf_mem_alloc_check_size(bool percpu, size_t size);
|
D | spinlock_rt.h | 11 struct lock_class_key *key, bool percpu); 14 struct lock_class_key *key, bool percpu) in __rt_spin_lock_init() argument
|
/linux-6.12.1/Documentation/translations/zh_CN/core-api/ |
D | workqueue.rst | 595 events percpu 0 2 4 6 596 events_highpri percpu 1 3 5 7 597 events_long percpu 0 2 4 6 599 events_freezable percpu 0 2 4 6 600 events_power_efficient percpu 0 2 4 6 601 events_freezable_power_ percpu 0 2 4 6 602 rcu_gp percpu 0 2 4 6 603 rcu_par_gp percpu 0 2 4 6 604 slub_flushwq percpu 0 2 4 6
|
/linux-6.12.1/net/rds/ |
D | ib_recv.c | 107 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache() 108 if (!cache->percpu) in rds_ib_recv_alloc_cache() 112 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 130 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_alloc_caches() 143 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists() 166 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_free_caches() 177 free_percpu(ic->i_cache_frags.percpu); in rds_ib_recv_free_caches() 487 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put() 493 __this_cpu_write(cache->percpu->first, new_item); in rds_ib_recv_cache_put() 494 __this_cpu_inc(cache->percpu->count); in rds_ib_recv_cache_put() [all …]
|
/linux-6.12.1/tools/testing/selftests/cgroup/ |
D | test_kmem.c | 356 long current, percpu; in test_percpu_basic() local 381 percpu = cg_read_key_long(parent, "memory.stat", "percpu "); in test_percpu_basic() 383 if (current > 0 && percpu > 0 && labs(current - percpu) < in test_percpu_basic() 388 current, percpu); in test_percpu_basic()
|
/linux-6.12.1/include/asm-generic/ |
D | vmlinux.lds.h | 959 *(.data..percpu..decrypted) \ 1037 *(.data..percpu..first) \ 1039 *(.data..percpu..page_aligned) \ 1041 *(.data..percpu..read_mostly) \ 1043 *(.data..percpu) \ 1044 *(.data..percpu..shared_aligned) \ 1074 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 1077 . = __per_cpu_load + SIZEOF(.data..percpu); 1093 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
|
/linux-6.12.1/drivers/md/ |
D | raid5.c | 1537 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) in to_addr_page() argument 1539 return percpu->scribble + i * percpu->scribble_obj_size; in to_addr_page() 1544 struct raid5_percpu *percpu, int i) in to_addr_conv() argument 1546 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv() 1553 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument 1555 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs() 1559 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument 1562 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5() 1563 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5() 1589 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5() [all …]
|
/linux-6.12.1/drivers/clocksource/ |
D | timer-qcom.c | 153 bool percpu) in msm_timer_init() argument 159 msm_timer_has_ppi = percpu; in msm_timer_init() 167 if (percpu) in msm_timer_init()
|
/linux-6.12.1/Documentation/trace/coresight/ |
D | coresight-trbe.rst | 13 Trace Buffer Extension (TRBE) is a percpu hardware which captures in system 14 memory, CPU traces generated from a corresponding percpu tracing unit. This
|
/linux-6.12.1/arch/sparc/kernel/ |
D | sun4m_irq.c | 107 bool percpu; member 200 if (handler_data->percpu) { in sun4m_mask_irq() 219 if (handler_data->percpu) { in sun4m_unmask_irq() 278 handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; in sun4m_build_device_irq()
|
/linux-6.12.1/arch/x86/kernel/ |
D | vmlinux.lds.S | 104 percpu PT_LOAD FLAGS(6); /* RW_ */ 231 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 232 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
|
/linux-6.12.1/kernel/sched/ |
D | cpuacct.c | 213 u64 percpu; in __cpuacct_percpu_seq_show() local 217 percpu = cpuacct_cpuusage_read(ca, i, index); in __cpuacct_percpu_seq_show() 218 seq_printf(m, "%llu ", (unsigned long long) percpu); in __cpuacct_percpu_seq_show()
|
/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | hyp.lds.S | 25 BEGIN_HYP_SECTION(.data..percpu)
|
/linux-6.12.1/Documentation/translations/zh_CN/dev-tools/ |
D | kmemleak.rst | 140 - ``kmemleak_alloc_percpu`` - 通知一个 percpu 类型的内存分配 144 - ``kmemleak_free_percpu`` - 通知一个 percpu 类型的内存释放
|
/linux-6.12.1/Documentation/RCU/ |
D | rcuref.rst | 8 Please note that the percpu-ref feature is likely your first 10 include/linux/percpu-refcount.h for more information. However, in 11 those unusual cases where percpu-ref would consume too much memory,
|
/linux-6.12.1/Documentation/locking/ |
D | index.rst | 24 percpu-rw-semaphore
|
/linux-6.12.1/kernel/locking/ |
D | spinlock_rt.c | 136 struct lock_class_key *key, bool percpu) in __rt_spin_lock_init() argument 138 u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; in __rt_spin_lock_init()
|
/linux-6.12.1/arch/arm64/kernel/ |
D | vmlinux.lds.S | 28 HYP_SECTION_NAME(.data..percpu) : { \ 29 *(HYP_SECTION_NAME(.data..percpu)) \
|