Lines Matching +full:nr +full:- +full:outputs
1 // SPDX-License-Identifier: GPL-2.0
63 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
69 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
127 current_thread_info()->new_child = 0; in smp_callin()
131 current->active_mm = &init_mm; in smp_callin()
156 * initiates the synchonization instead of the slave. -DaveM
177 t0 = tick_ops->get_tick(); in get_delta()
184 t1 = tick_ops->get_tick(); in get_delta()
186 if (t1 - t0 < best_t1 - best_t0) in get_delta()
190 *rt = best_t1 - best_t0; in get_delta()
191 *master = best_tm - best_t0; in get_delta()
197 return tcenter - best_tm; in get_delta()
227 adjust_latency += -delta; in smp_synchronize_tick_client()
228 adj = -delta + adjust_latency/4; in smp_synchronize_tick_client()
230 adj = -delta; in smp_synchronize_tick_client()
232 tick_ops->add_tick(adj); in smp_synchronize_tick_client()
280 go[SLAVE] = tick_ops->get_tick(); in smp_synchronize_one_tick()
302 num_kernel_image_mappings - 1), in ldom_startcpu_cpuid()
311 hdesc->cpu = cpu; in ldom_startcpu_cpuid()
312 hdesc->num_mappings = num_kernel_image_mappings; in ldom_startcpu_cpuid()
316 hdesc->fault_info_va = (unsigned long) &tb->fault_info; in ldom_startcpu_cpuid()
317 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); in ldom_startcpu_cpuid()
319 hdesc->thread_reg = thread_reg; in ldom_startcpu_cpuid()
324 for (i = 0; i < hdesc->num_mappings; i++) { in ldom_startcpu_cpuid()
325 hdesc->maps[i].vaddr = tte_vaddr; in ldom_startcpu_cpuid()
326 hdesc->maps[i].tte = tte_data; in ldom_startcpu_cpuid()
345 * 32-bits (I think) so to be safe we have it read the pointer
346 * contained here so we work on >4GB machines. -DaveM
374 prom_startcpu(dp->phandle, entry, cookie); in smp_boot_one_cpu()
387 ret = -ENODEV; in smp_boot_one_cpu()
415 * ADDR 0x20) for the dummy read. -DaveM in spitfire_xcall_helper()
447 stuck -= 1; in spitfire_xcall_helper()
470 cpu_list = __va(tb->cpu_list_pa); in spitfire_xcall_deliver()
471 mondo = __va(tb->cpu_mondo_block_pa); in spitfire_xcall_deliver()
479 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
489 cpu_list = __va(tb->cpu_list_pa); in cheetah_xcall_deliver()
490 mondo = __va(tb->cpu_mondo_block_pa); in cheetah_xcall_deliver()
493 * busy/nack fields hard-coded by ITID number for this Ultra-III in cheetah_xcall_deliver()
512 : /* no outputs */ in cheetah_xcall_deliver()
523 u64 target, nr; in cheetah_xcall_deliver() local
525 nr = cpu_list[i]; in cheetah_xcall_deliver()
526 if (nr == 0xffff) in cheetah_xcall_deliver()
529 target = (nr << 14) | 0x70; in cheetah_xcall_deliver()
531 busy_mask |= (0x1UL << (nr * 2)); in cheetah_xcall_deliver()
540 : /* no outputs */ in cheetah_xcall_deliver()
578 if (!--stuck) in cheetah_xcall_deliver()
603 u64 check_mask, nr; in cheetah_xcall_deliver() local
605 nr = cpu_list[i]; in cheetah_xcall_deliver()
606 if (nr == 0xffff) in cheetah_xcall_deliver()
610 check_mask = (0x2UL << (2*nr)); in cheetah_xcall_deliver()
631 /* Multi-cpu list version.
634 * Sometimes not all cpus receive the mondo, requiring us to re-send
640 * Here two in-between mondo check wait time are defined: 2 usec for
658 cpu_list = __va(tb->cpu_list_pa); in hypervisor_xcall_deliver()
670 tb->cpu_list_pa, in hypervisor_xcall_deliver()
671 tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
677 /* If not these non-fatal errors, panic */ in hypervisor_xcall_deliver()
687 * Re-pack cpu_list with the cpus remain to be retried in the in hypervisor_xcall_deliver()
688 * front - this simplifies tracking the truly stalled cpus. in hypervisor_xcall_deliver()
764 this_cpu, ecpuerror_id - 1); in hypervisor_xcall_deliver()
767 this_cpu, enocpu_id - 1); in hypervisor_xcall_deliver()
774 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
778 /* some cpus being non-responsive to the cpu mondo */ in hypervisor_xcall_deliver()
809 mondo = __va(tb->cpu_mondo_block_pa); in xcall_deliver()
815 cpu_list = __va(tb->cpu_list_pa); in xcall_deliver()
892 /* It is not valid to test "current->active_mm == mm" here. in tsb_sync()
898 if (tp->pgd_paddr == __pa(mm->pgd)) in tsb_sync()
926 unsigned int i, nr = folio_nr_pages(folio); in __local_flush_dcache_folio() local
929 for (i = 0; i < nr; i++) in __local_flush_dcache_folio()
937 for (i = 0; i < nr; i++) in __local_flush_dcache_folio()
972 unsigned int i, nr = folio_nr_pages(folio); in smp_flush_dcache_folio_impl() local
974 for (i = 0; i < nr; i++) { in smp_flush_dcache_folio_impl()
1013 unsigned int i, nr = folio_nr_pages(folio); in flush_dcache_folio_all() local
1015 for (i = 0; i < nr; i++) { in flush_dcache_folio_all()
1055 * mm->cpu_vm_mask is a bit mask of which cpus an address
1060 /* This currently is only used by the hugetlb arch pre-fault
1061 * hook on UltraSPARC-III+ and later when changing the pagesize
1066 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_mm()
1081 unsigned long nr; member
1089 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); in tlb_pending_func()
1092 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) in smp_flush_tlb_pending() argument
1094 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_pending()
1100 info.nr = nr; in smp_flush_tlb_pending()
1106 __flush_tlb_pending(ctx, nr, vaddrs); in smp_flush_tlb_pending()
1113 unsigned long context = CTX_HWBITS(mm->context); in smp_flush_tlb_page()
1256 if (cpu_data(i).proc_id == -1) { in smp_fill_in_sib_core_maps()
1278 ret = -ENODEV; in __cpu_up()
1302 tb->cpu_mondo_pa, 0); in cpu_play_dead()
1304 tb->dev_mondo_pa, 0); in cpu_play_dead()
1306 tb->resum_mondo_pa, 0); in cpu_play_dead()
1308 tb->nonresum_mondo_pa, 0); in cpu_play_dead()
1342 c->core_id = 0; in __cpu_disable()
1343 c->proc_id = -1; in __cpu_disable()
1384 } while (--limit > 0); in __cpu_die()
1449 * - cpu poke not supported in arch_smp_send_reschedule()
1450 * - cpu not idle in arch_smp_send_reschedule()
1451 * - send_cpu_poke() returns with error in arch_smp_send_reschedule()
1538 int rc = -EINVAL; in setup_per_cpu_areas()
1556 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; in setup_per_cpu_areas()