Lines Matching +full:- +full:affinity

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
12 #include "affinity.h"
35 cpumask_clear(&set->mask); in init_cpu_mask_set()
36 cpumask_clear(&set->used); in init_cpu_mask_set()
37 set->gen = 0; in init_cpu_mask_set()
43 if (cpumask_equal(&set->mask, &set->used)) { in _cpu_mask_set_gen_inc()
48 set->gen++; in _cpu_mask_set_gen_inc()
49 cpumask_clear(&set->used); in _cpu_mask_set_gen_inc()
55 if (cpumask_empty(&set->used) && set->gen) { in _cpu_mask_set_gen_dec()
56 set->gen--; in _cpu_mask_set_gen_dec()
57 cpumask_copy(&set->used, &set->mask); in _cpu_mask_set_gen_dec()
67 return -EINVAL; in cpu_mask_set_get_first()
72 cpumask_andnot(diff, &set->mask, &set->used); in cpu_mask_set_get_first()
76 cpu = -EINVAL; in cpu_mask_set_get_first()
78 cpumask_set_cpu(cpu, &set->used); in cpu_mask_set_get_first()
88 cpumask_clear_cpu(cpu, &set->used); in cpu_mask_set_put()
92 /* Initialize non-HT cpu cores mask */
145 * The real cpu mask is part of the affinity struct but it has to be in node_affinity_init()
154 return -ENOMEM; in node_affinity_init()
156 while (ids->vendor) { in node_affinity_init()
158 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { in node_affinity_init()
159 node = pcibus_to_node(dev->bus); in node_affinity_init()
187 free_percpu(entry->comp_vect_affinity); in node_affinity_destroy()
214 entry->node = node; in node_affinity_allocate()
215 entry->comp_vect_affinity = alloc_percpu(u16); in node_affinity_allocate()
216 INIT_LIST_HEAD(&entry->list); in node_affinity_allocate()
227 list_add_tail(&entry->list, &node_affinity.list); in node_affinity_add_tail()
236 if (entry->node == node) in node_affinity_lookup()
252 ret_cpu = -EINVAL; in per_cpu_affinity_get()
257 ret_cpu = -EINVAL; in per_cpu_affinity_get()
263 ret_cpu = -EINVAL; in per_cpu_affinity_get()
292 return -EINVAL; in per_cpu_affinity_put_max()
295 return -EINVAL; in per_cpu_affinity_put_max()
299 return -EINVAL; in per_cpu_affinity_put_max()
311 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1; in per_cpu_affinity_put_max()
317 * Non-interrupt CPUs are used first, then interrupt CPUs.
327 struct cpu_mask_set *set = dd->comp_vect; in _dev_comp_vect_cpu_get()
331 cpu = -1; in _dev_comp_vect_cpu_get()
336 cpu = -1; in _dev_comp_vect_cpu_get()
342 cpumask_andnot(available_cpus, &set->mask, &set->used); in _dev_comp_vect_cpu_get()
346 &entry->def_intr.used); in _dev_comp_vect_cpu_get()
348 /* If there are non-interrupt CPUs available, use them first */ in _dev_comp_vect_cpu_get()
355 cpu = -1; in _dev_comp_vect_cpu_get()
358 cpumask_set_cpu(cpu, &set->used); in _dev_comp_vect_cpu_get()
366 struct cpu_mask_set *set = dd->comp_vect; in _dev_comp_vect_cpu_put()
379 if (!dd->comp_vect_mappings) in _dev_comp_vect_mappings_destroy()
382 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_mappings_destroy()
383 cpu = dd->comp_vect_mappings[i]; in _dev_comp_vect_mappings_destroy()
385 dd->comp_vect_mappings[i] = -1; in _dev_comp_vect_mappings_destroy()
386 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_destroy()
388 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i); in _dev_comp_vect_mappings_destroy()
391 kfree(dd->comp_vect_mappings); in _dev_comp_vect_mappings_destroy()
392 dd->comp_vect_mappings = NULL; in _dev_comp_vect_mappings_destroy()
410 return -ENOMEM; in _dev_comp_vect_mappings_create()
414 return -ENOMEM; in _dev_comp_vect_mappings_create()
417 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus, in _dev_comp_vect_mappings_create()
418 sizeof(*dd->comp_vect_mappings), in _dev_comp_vect_mappings_create()
420 if (!dd->comp_vect_mappings) { in _dev_comp_vect_mappings_create()
421 ret = -ENOMEM; in _dev_comp_vect_mappings_create()
424 for (i = 0; i < dd->comp_vect_possible_cpus; i++) in _dev_comp_vect_mappings_create()
425 dd->comp_vect_mappings[i] = -1; in _dev_comp_vect_mappings_create()
427 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_mappings_create()
431 ret = -EINVAL; in _dev_comp_vect_mappings_create()
435 dd->comp_vect_mappings[i] = cpu; in _dev_comp_vect_mappings_create()
436 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_create()
437 "[%s] Completion Vector %d -> CPU %d", in _dev_comp_vect_mappings_create()
438 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); in _dev_comp_vect_mappings_create()
459 entry = node_affinity_lookup(dd->node); in hfi1_comp_vectors_set_up()
461 ret = -EINVAL; in hfi1_comp_vectors_set_up()
481 if (!dd->comp_vect_mappings) in hfi1_comp_vect_mappings_lookup()
482 return -EINVAL; in hfi1_comp_vect_mappings_lookup()
483 if (comp_vect >= dd->comp_vect_possible_cpus) in hfi1_comp_vect_mappings_lookup()
484 return -EINVAL; in hfi1_comp_vect_mappings_lookup()
486 return dd->comp_vect_mappings[comp_vect]; in hfi1_comp_vect_mappings_lookup()
490 * It assumes dd->comp_vect_possible_cpus is available.
499 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask; in _dev_comp_vect_cpu_mask_init()
509 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init()
512 … "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); in _dev_comp_vect_cpu_mask_init()
515 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init()
516 hfi1_per_node_cntr[dd->node]; in _dev_comp_vect_cpu_mask_init()
524 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init()
525 hfi1_per_node_cntr[dd->node] != 0) in _dev_comp_vect_cpu_mask_init()
529 dd->comp_vect_possible_cpus = possible_cpus_comp_vect; in _dev_comp_vect_cpu_mask_init()
532 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_cpu_mask_init()
533 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask, in _dev_comp_vect_cpu_mask_init()
534 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_init()
541 hfi1_cdbg(AFFINITY, in _dev_comp_vect_cpu_mask_init()
542 "[%s] Completion vector affinity CPU set(s) %*pbl", in _dev_comp_vect_cpu_mask_init()
543 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), in _dev_comp_vect_cpu_mask_init()
550 per_cpu_affinity_put_max(&entry->comp_vect_mask, in _dev_comp_vect_cpu_mask_init()
551 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_init()
557 * It assumes dd->comp_vect_possible_cpus is available.
566 if (!dd->comp_vect_possible_cpus) in _dev_comp_vect_cpu_mask_clean_up()
569 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_cpu_mask_clean_up()
570 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask, in _dev_comp_vect_cpu_mask_clean_up()
571 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_clean_up()
574 cpumask_clear_cpu(cpu, &dd->comp_vect->mask); in _dev_comp_vect_cpu_mask_clean_up()
577 dd->comp_vect_possible_cpus = 0; in _dev_comp_vect_cpu_mask_clean_up()
581 * Interrupt affinity.
583 * non-rcv avail gets a default mask that
598 local_mask = cpumask_of_node(dd->node); in hfi1_dev_affinity_init()
603 entry = node_affinity_lookup(dd->node); in hfi1_dev_affinity_init()
606 * If this is the first time this NUMA node's affinity is used, in hfi1_dev_affinity_init()
607 * create an entry in the global affinity structure and initialize it. in hfi1_dev_affinity_init()
610 entry = node_affinity_allocate(dd->node); in hfi1_dev_affinity_init()
613 "Unable to allocate global affinity node\n"); in hfi1_dev_affinity_init()
614 ret = -ENOMEM; in hfi1_dev_affinity_init()
619 init_cpu_mask_set(&entry->def_intr); in hfi1_dev_affinity_init()
620 init_cpu_mask_set(&entry->rcv_intr); in hfi1_dev_affinity_init()
621 cpumask_clear(&entry->comp_vect_mask); in hfi1_dev_affinity_init()
622 cpumask_clear(&entry->general_intr_mask); in hfi1_dev_affinity_init()
624 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, in hfi1_dev_affinity_init()
628 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init()
629 curr_cpu = cpumask_first(&entry->def_intr.mask); in hfi1_dev_affinity_init()
633 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
634 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); in hfi1_dev_affinity_init()
641 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); in hfi1_dev_affinity_init()
642 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); in hfi1_dev_affinity_init()
644 &entry->def_intr.mask); in hfi1_dev_affinity_init()
651 i < (dd->n_krcv_queues - 1) * in hfi1_dev_affinity_init()
652 hfi1_per_node_cntr[dd->node]; in hfi1_dev_affinity_init()
655 &entry->def_intr.mask); in hfi1_dev_affinity_init()
657 &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
659 &entry->def_intr.mask); in hfi1_dev_affinity_init()
669 if (cpumask_empty(&entry->def_intr.mask)) in hfi1_dev_affinity_init()
670 cpumask_copy(&entry->def_intr.mask, in hfi1_dev_affinity_init()
671 &entry->general_intr_mask); in hfi1_dev_affinity_init()
675 cpumask_and(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
677 cpumask_andnot(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
678 &entry->comp_vect_mask, in hfi1_dev_affinity_init()
679 &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
680 cpumask_andnot(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
681 &entry->comp_vect_mask, in hfi1_dev_affinity_init()
682 &entry->general_intr_mask); in hfi1_dev_affinity_init()
689 if (cpumask_empty(&entry->comp_vect_mask)) in hfi1_dev_affinity_init()
690 cpumask_copy(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
691 &entry->general_intr_mask); in hfi1_dev_affinity_init()
701 dd->affinity_entry = entry; in hfi1_dev_affinity_init()
718 if (!dd->affinity_entry) in hfi1_dev_affinity_clean_up()
720 entry = node_affinity_lookup(dd->node); in hfi1_dev_affinity_clean_up()
730 dd->affinity_entry = NULL; in hfi1_dev_affinity_clean_up()
735 * Function updates the irq affinity hint for msix after it has been changed
741 struct sdma_engine *sde = msix->arg; in hfi1_update_sdma_affinity()
742 struct hfi1_devdata *dd = sde->dd; in hfi1_update_sdma_affinity()
747 if (cpu > num_online_cpus() || cpu == sde->cpu) in hfi1_update_sdma_affinity()
751 entry = node_affinity_lookup(dd->node); in hfi1_update_sdma_affinity()
755 old_cpu = sde->cpu; in hfi1_update_sdma_affinity()
756 sde->cpu = cpu; in hfi1_update_sdma_affinity()
757 cpumask_clear(&msix->mask); in hfi1_update_sdma_affinity()
758 cpumask_set_cpu(cpu, &msix->mask); in hfi1_update_sdma_affinity()
759 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n", in hfi1_update_sdma_affinity()
760 msix->irq, irq_type_names[msix->type], in hfi1_update_sdma_affinity()
761 sde->this_idx, cpu); in hfi1_update_sdma_affinity()
762 irq_set_affinity_hint(msix->irq, &msix->mask); in hfi1_update_sdma_affinity()
768 set = &entry->def_intr; in hfi1_update_sdma_affinity()
769 cpumask_set_cpu(cpu, &set->mask); in hfi1_update_sdma_affinity()
770 cpumask_set_cpu(cpu, &set->used); in hfi1_update_sdma_affinity()
771 for (i = 0; i < dd->msix_info.max_requested; i++) { in hfi1_update_sdma_affinity()
774 other_msix = &dd->msix_info.msix_entries[i]; in hfi1_update_sdma_affinity()
775 if (other_msix->type != IRQ_SDMA || other_msix == msix) in hfi1_update_sdma_affinity()
778 if (cpumask_test_cpu(old_cpu, &other_msix->mask)) in hfi1_update_sdma_affinity()
781 cpumask_clear_cpu(old_cpu, &set->mask); in hfi1_update_sdma_affinity()
782 cpumask_clear_cpu(old_cpu, &set->used); in hfi1_update_sdma_affinity()
802 * This is required by affinity notifier. We don't have anything to in hfi1_irq_notifier_release()
809 struct irq_affinity_notify *notify = &msix->notify; in hfi1_setup_sdma_notifier()
811 notify->irq = msix->irq; in hfi1_setup_sdma_notifier()
812 notify->notify = hfi1_irq_notifier_notify; in hfi1_setup_sdma_notifier()
813 notify->release = hfi1_irq_notifier_release; in hfi1_setup_sdma_notifier()
815 if (irq_set_affinity_notifier(notify->irq, notify)) in hfi1_setup_sdma_notifier()
816 pr_err("Failed to register sdma irq affinity notifier for irq %d\n", in hfi1_setup_sdma_notifier()
817 notify->irq); in hfi1_setup_sdma_notifier()
822 struct irq_affinity_notify *notify = &msix->notify; in hfi1_cleanup_sdma_notifier()
824 if (irq_set_affinity_notifier(notify->irq, NULL)) in hfi1_cleanup_sdma_notifier()
825 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n", in hfi1_cleanup_sdma_notifier()
826 notify->irq); in hfi1_cleanup_sdma_notifier()
830 * Function sets the irq affinity for msix.
842 int cpu = -1; in get_irq_affinity()
845 cpumask_clear(&msix->mask); in get_irq_affinity()
847 entry = node_affinity_lookup(dd->node); in get_irq_affinity()
849 switch (msix->type) { in get_irq_affinity()
851 sde = (struct sdma_engine *)msix->arg; in get_irq_affinity()
852 scnprintf(extra, 64, "engine %u", sde->this_idx); in get_irq_affinity()
853 set = &entry->def_intr; in get_irq_affinity()
856 cpu = cpumask_first(&entry->general_intr_mask); in get_irq_affinity()
859 rcd = (struct hfi1_ctxtdata *)msix->arg; in get_irq_affinity()
860 if (rcd->ctxt == HFI1_CTRL_CTXT) in get_irq_affinity()
861 cpu = cpumask_first(&entry->general_intr_mask); in get_irq_affinity()
863 set = &entry->rcv_intr; in get_irq_affinity()
864 scnprintf(extra, 64, "ctxt %u", rcd->ctxt); in get_irq_affinity()
867 rcd = (struct hfi1_ctxtdata *)msix->arg; in get_irq_affinity()
868 set = &entry->def_intr; in get_irq_affinity()
869 scnprintf(extra, 64, "ctxt %u", rcd->ctxt); in get_irq_affinity()
872 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); in get_irq_affinity()
873 return -EINVAL; in get_irq_affinity()
881 if (cpu == -1 && set) { in get_irq_affinity()
883 return -ENOMEM; in get_irq_affinity()
895 cpumask_set_cpu(cpu, &msix->mask); in get_irq_affinity()
896 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n", in get_irq_affinity()
897 msix->irq, irq_type_names[msix->type], in get_irq_affinity()
899 irq_set_affinity_hint(msix->irq, &msix->mask); in get_irq_affinity()
901 if (msix->type == IRQ_SDMA) { in get_irq_affinity()
902 sde->cpu = cpu; in get_irq_affinity()
926 entry = node_affinity_lookup(dd->node); in hfi1_put_irq_affinity()
928 switch (msix->type) { in hfi1_put_irq_affinity()
930 set = &entry->def_intr; in hfi1_put_irq_affinity()
937 struct hfi1_ctxtdata *rcd = msix->arg; in hfi1_put_irq_affinity()
940 if (rcd->ctxt != HFI1_CTRL_CTXT) in hfi1_put_irq_affinity()
941 set = &entry->rcv_intr; in hfi1_put_irq_affinity()
945 set = &entry->def_intr; in hfi1_put_irq_affinity()
953 cpumask_andnot(&set->used, &set->used, &msix->mask); in hfi1_put_irq_affinity()
957 irq_set_affinity_hint(msix->irq, NULL); in hfi1_put_irq_affinity()
958 cpumask_clear(&msix->mask); in hfi1_put_irq_affinity()
964 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument
968 affinity->num_core_siblings / in find_hw_thread_mask()
971 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask()
972 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask()
996 int cpu = -1, ret, i; in hfi1_get_proc_affinity()
1000 *proc_mask = current->cpus_ptr; in hfi1_get_proc_affinity()
1001 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_get_proc_affinity() local
1002 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity()
1005 * check whether process/context affinity has already in hfi1_get_proc_affinity()
1008 if (current->nr_cpus_allowed == 1) { in hfi1_get_proc_affinity()
1009 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", in hfi1_get_proc_affinity()
1010 current->pid, current->comm, in hfi1_get_proc_affinity()
1013 * Mark the pre-set CPU as used. This is atomic so we don't in hfi1_get_proc_affinity()
1017 cpumask_set_cpu(cpu, &set->used); in hfi1_get_proc_affinity()
1019 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
1020 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", in hfi1_get_proc_affinity()
1021 current->pid, current->comm, in hfi1_get_proc_affinity()
1027 * The process does not have a preset CPU affinity so find one to in hfi1_get_proc_affinity()
1060 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity()
1073 cpumask_copy(intrs_mask, (entry->def_intr.gen ? in hfi1_get_proc_affinity()
1074 &entry->def_intr.mask : in hfi1_get_proc_affinity()
1075 &entry->def_intr.used)); in hfi1_get_proc_affinity()
1076 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ? in hfi1_get_proc_affinity()
1077 &entry->rcv_intr.mask : in hfi1_get_proc_affinity()
1078 &entry->rcv_intr.used)); in hfi1_get_proc_affinity()
1079 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask); in hfi1_get_proc_affinity()
1084 cpumask_copy(hw_thread_mask, &set->mask); in hfi1_get_proc_affinity()
1090 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity()
1091 for (i = 0; i < affinity->num_core_siblings; i++) { in hfi1_get_proc_affinity()
1092 find_hw_thread_mask(i, hw_thread_mask, affinity); in hfi1_get_proc_affinity()
1100 * (set->mask == set->used) before this loop. in hfi1_get_proc_affinity()
1102 cpumask_andnot(diff, hw_thread_mask, &set->used); in hfi1_get_proc_affinity()
1116 cpumask_andnot(available_mask, available_mask, &set->used); in hfi1_get_proc_affinity()
1126 * non-interrupt handlers available, so diff gets copied in hfi1_get_proc_affinity()
1142 cpumask_andnot(available_mask, hw_thread_mask, &set->used); in hfi1_get_proc_affinity()
1162 cpu = -1; in hfi1_get_proc_affinity()
1164 cpumask_set_cpu(cpu, &set->used); in hfi1_get_proc_affinity()
1166 mutex_unlock(&affinity->lock); in hfi1_get_proc_affinity()
1182 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_put_proc_affinity() local
1183 struct cpu_mask_set *set = &affinity->proc; in hfi1_put_proc_affinity()
1188 mutex_lock(&affinity->lock); in hfi1_put_proc_affinity()
1191 mutex_unlock(&affinity->lock); in hfi1_put_proc_affinity()