Lines Matching refs:idxd

85 static int idxd_setup_interrupts(struct idxd_device *idxd)  in idxd_setup_interrupts()  argument
87 struct pci_dev *pdev = idxd->pdev; in idxd_setup_interrupts()
98 idxd->irq_cnt = msixcnt; in idxd_setup_interrupts()
108 ie = idxd_get_ie(idxd, 0); in idxd_setup_interrupts()
117 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_interrupts()
120 ie = idxd_get_ie(idxd, msix_idx); in idxd_setup_interrupts()
130 idxd_unmask_error_interrupts(idxd); in idxd_setup_interrupts()
134 idxd_mask_error_interrupts(idxd); in idxd_setup_interrupts()
140 static void idxd_cleanup_interrupts(struct idxd_device *idxd) in idxd_cleanup_interrupts() argument
142 struct pci_dev *pdev = idxd->pdev; in idxd_cleanup_interrupts()
150 ie = idxd_get_ie(idxd, 0); in idxd_cleanup_interrupts()
151 idxd_mask_error_interrupts(idxd); in idxd_cleanup_interrupts()
156 static int idxd_setup_wqs(struct idxd_device *idxd) in idxd_setup_wqs() argument
158 struct device *dev = &idxd->pdev->dev; in idxd_setup_wqs()
163 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), in idxd_setup_wqs()
165 if (!idxd->wqs) in idxd_setup_wqs()
168 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); in idxd_setup_wqs()
169 if (!idxd->wq_enable_map) { in idxd_setup_wqs()
170 kfree(idxd->wqs); in idxd_setup_wqs()
174 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_wqs()
184 wq->idxd = idxd; in idxd_setup_wqs()
186 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_wqs()
189 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); in idxd_setup_wqs()
200 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); in idxd_setup_wqs()
202 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); in idxd_setup_wqs()
209 if (idxd->hw.wq_cap.op_config) { in idxd_setup_wqs()
216 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); in idxd_setup_wqs()
220 idxd->wqs[i] = wq; in idxd_setup_wqs()
227 wq = idxd->wqs[i]; in idxd_setup_wqs()
234 static int idxd_setup_engines(struct idxd_device *idxd) in idxd_setup_engines() argument
237 struct device *dev = &idxd->pdev->dev; in idxd_setup_engines()
241 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), in idxd_setup_engines()
243 if (!idxd->engines) in idxd_setup_engines()
246 for (i = 0; i < idxd->max_engines; i++) { in idxd_setup_engines()
256 engine->idxd = idxd; in idxd_setup_engines()
258 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_engines()
261 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); in idxd_setup_engines()
267 idxd->engines[i] = engine; in idxd_setup_engines()
274 engine = idxd->engines[i]; in idxd_setup_engines()
281 static int idxd_setup_groups(struct idxd_device *idxd) in idxd_setup_groups() argument
283 struct device *dev = &idxd->pdev->dev; in idxd_setup_groups()
288 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), in idxd_setup_groups()
290 if (!idxd->groups) in idxd_setup_groups()
293 for (i = 0; i < idxd->max_groups; i++) { in idxd_setup_groups()
303 group->idxd = idxd; in idxd_setup_groups()
305 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_groups()
308 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); in idxd_setup_groups()
314 idxd->groups[i] = group; in idxd_setup_groups()
315 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { in idxd_setup_groups()
326 group->rdbufs_allowed = idxd->max_rdbufs; in idxd_setup_groups()
333 group = idxd->groups[i]; in idxd_setup_groups()
339 static void idxd_cleanup_internals(struct idxd_device *idxd) in idxd_cleanup_internals() argument
343 for (i = 0; i < idxd->max_groups; i++) in idxd_cleanup_internals()
344 put_device(group_confdev(idxd->groups[i])); in idxd_cleanup_internals()
345 for (i = 0; i < idxd->max_engines; i++) in idxd_cleanup_internals()
346 put_device(engine_confdev(idxd->engines[i])); in idxd_cleanup_internals()
347 for (i = 0; i < idxd->max_wqs; i++) in idxd_cleanup_internals()
348 put_device(wq_confdev(idxd->wqs[i])); in idxd_cleanup_internals()
349 destroy_workqueue(idxd->wq); in idxd_cleanup_internals()
352 static int idxd_init_evl(struct idxd_device *idxd) in idxd_init_evl() argument
354 struct device *dev = &idxd->pdev->dev; in idxd_init_evl()
359 if (idxd->hw.gen_cap.evl_support == 0) in idxd_init_evl()
369 idxd_name = dev_name(idxd_confdev(idxd)); in idxd_init_evl()
370 evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); in idxd_init_evl()
376 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, in idxd_init_evl()
379 if (!idxd->evl_cache) { in idxd_init_evl()
384 idxd->evl = evl; in idxd_init_evl()
388 static int idxd_setup_internals(struct idxd_device *idxd) in idxd_setup_internals() argument
390 struct device *dev = &idxd->pdev->dev; in idxd_setup_internals()
393 init_waitqueue_head(&idxd->cmd_waitq); in idxd_setup_internals()
395 rc = idxd_setup_wqs(idxd); in idxd_setup_internals()
399 rc = idxd_setup_engines(idxd); in idxd_setup_internals()
403 rc = idxd_setup_groups(idxd); in idxd_setup_internals()
407 idxd->wq = create_workqueue(dev_name(dev)); in idxd_setup_internals()
408 if (!idxd->wq) { in idxd_setup_internals()
413 rc = idxd_init_evl(idxd); in idxd_setup_internals()
420 destroy_workqueue(idxd->wq); in idxd_setup_internals()
422 for (i = 0; i < idxd->max_groups; i++) in idxd_setup_internals()
423 put_device(group_confdev(idxd->groups[i])); in idxd_setup_internals()
425 for (i = 0; i < idxd->max_engines; i++) in idxd_setup_internals()
426 put_device(engine_confdev(idxd->engines[i])); in idxd_setup_internals()
428 for (i = 0; i < idxd->max_wqs; i++) in idxd_setup_internals()
429 put_device(wq_confdev(idxd->wqs[i])); in idxd_setup_internals()
434 static void idxd_read_table_offsets(struct idxd_device *idxd) in idxd_read_table_offsets() argument
437 struct device *dev = &idxd->pdev->dev; in idxd_read_table_offsets()
439 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); in idxd_read_table_offsets()
440 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); in idxd_read_table_offsets()
441 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; in idxd_read_table_offsets()
442 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); in idxd_read_table_offsets()
443 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; in idxd_read_table_offsets()
444 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); in idxd_read_table_offsets()
445 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; in idxd_read_table_offsets()
446 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); in idxd_read_table_offsets()
447 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; in idxd_read_table_offsets()
448 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); in idxd_read_table_offsets()
464 static void idxd_read_caps(struct idxd_device *idxd) in idxd_read_caps() argument
466 struct device *dev = &idxd->pdev->dev; in idxd_read_caps()
470 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); in idxd_read_caps()
471 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); in idxd_read_caps()
473 if (idxd->hw.gen_cap.cmd_cap) { in idxd_read_caps()
474 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); in idxd_read_caps()
475 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); in idxd_read_caps()
479 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) in idxd_read_caps()
480 idxd->request_int_handles = true; in idxd_read_caps()
482 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; in idxd_read_caps()
483 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); in idxd_read_caps()
484 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); in idxd_read_caps()
485 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); in idxd_read_caps()
486 if (idxd->hw.gen_cap.config_en) in idxd_read_caps()
487 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); in idxd_read_caps()
490 idxd->hw.group_cap.bits = in idxd_read_caps()
491 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); in idxd_read_caps()
492 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); in idxd_read_caps()
493 idxd->max_groups = idxd->hw.group_cap.num_groups; in idxd_read_caps()
494 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); in idxd_read_caps()
495 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; in idxd_read_caps()
496 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); in idxd_read_caps()
497 idxd->nr_rdbufs = idxd->max_rdbufs; in idxd_read_caps()
500 idxd->hw.engine_cap.bits = in idxd_read_caps()
501 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); in idxd_read_caps()
502 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); in idxd_read_caps()
503 idxd->max_engines = idxd->hw.engine_cap.num_engines; in idxd_read_caps()
504 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); in idxd_read_caps()
507 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); in idxd_read_caps()
508 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); in idxd_read_caps()
509 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; in idxd_read_caps()
510 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); in idxd_read_caps()
511 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; in idxd_read_caps()
512 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); in idxd_read_caps()
513 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); in idxd_read_caps()
514 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); in idxd_read_caps()
518 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + in idxd_read_caps()
520 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); in idxd_read_caps()
522 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); in idxd_read_caps()
525 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) in idxd_read_caps()
526 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); in idxd_read_caps()
533 struct idxd_device *idxd; in idxd_alloc() local
536 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); in idxd_alloc()
537 if (!idxd) in idxd_alloc()
540 conf_dev = idxd_confdev(idxd); in idxd_alloc()
541 idxd->pdev = pdev; in idxd_alloc()
542 idxd->data = data; in idxd_alloc()
543 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); in idxd_alloc()
544 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); in idxd_alloc()
545 if (idxd->id < 0) in idxd_alloc()
548 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); in idxd_alloc()
549 if (!idxd->opcap_bmap) { in idxd_alloc()
550 ida_free(&idxd_ida, idxd->id); in idxd_alloc()
557 conf_dev->type = idxd->data->dev_type; in idxd_alloc()
558 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); in idxd_alloc()
564 spin_lock_init(&idxd->dev_lock); in idxd_alloc()
565 spin_lock_init(&idxd->cmd_lock); in idxd_alloc()
567 return idxd; in idxd_alloc()
570 static int idxd_enable_system_pasid(struct idxd_device *idxd) in idxd_enable_system_pasid() argument
572 struct pci_dev *pdev = idxd->pdev; in idxd_enable_system_pasid()
603 idxd_set_user_intr(idxd, 1); in idxd_enable_system_pasid()
604 idxd->pasid = pasid; in idxd_enable_system_pasid()
609 static void idxd_disable_system_pasid(struct idxd_device *idxd) in idxd_disable_system_pasid() argument
611 struct pci_dev *pdev = idxd->pdev; in idxd_disable_system_pasid()
619 iommu_detach_device_pasid(domain, dev, idxd->pasid); in idxd_disable_system_pasid()
620 iommu_free_global_pasid(idxd->pasid); in idxd_disable_system_pasid()
622 idxd_set_user_intr(idxd, 0); in idxd_disable_system_pasid()
623 idxd->sva = NULL; in idxd_disable_system_pasid()
624 idxd->pasid = IOMMU_PASID_INVALID; in idxd_disable_system_pasid()
648 static int idxd_probe(struct idxd_device *idxd) in idxd_probe() argument
650 struct pci_dev *pdev = idxd->pdev; in idxd_probe()
655 rc = idxd_device_init_reset(idxd); in idxd_probe()
665 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); in idxd_probe()
667 rc = idxd_enable_system_pasid(idxd); in idxd_probe()
671 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); in idxd_probe()
677 idxd_read_caps(idxd); in idxd_probe()
678 idxd_read_table_offsets(idxd); in idxd_probe()
680 rc = idxd_setup_internals(idxd); in idxd_probe()
685 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_probe()
687 rc = idxd_device_load_config(idxd); in idxd_probe()
692 rc = idxd_setup_interrupts(idxd); in idxd_probe()
696 idxd->major = idxd_cdev_get_major(idxd); in idxd_probe()
698 rc = perfmon_pmu_init(idxd); in idxd_probe()
702 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); in idxd_probe()
706 idxd_cleanup_internals(idxd); in idxd_probe()
708 if (device_pasid_enabled(idxd)) in idxd_probe()
709 idxd_disable_system_pasid(idxd); in idxd_probe()
710 if (device_user_pasid_enabled(idxd)) in idxd_probe()
715 static void idxd_cleanup(struct idxd_device *idxd) in idxd_cleanup() argument
717 perfmon_pmu_remove(idxd); in idxd_cleanup()
718 idxd_cleanup_interrupts(idxd); in idxd_cleanup()
719 idxd_cleanup_internals(idxd); in idxd_cleanup()
720 if (device_pasid_enabled(idxd)) in idxd_cleanup()
721 idxd_disable_system_pasid(idxd); in idxd_cleanup()
722 if (device_user_pasid_enabled(idxd)) in idxd_cleanup()
723 idxd_disable_sva(idxd->pdev); in idxd_cleanup()
729 struct idxd_device *idxd; in idxd_pci_probe() local
738 idxd = idxd_alloc(pdev, data); in idxd_pci_probe()
739 if (!idxd) { in idxd_pci_probe()
745 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); in idxd_pci_probe()
746 if (!idxd->reg_base) { in idxd_pci_probe()
758 pci_set_drvdata(pdev, idxd); in idxd_pci_probe()
760 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); in idxd_pci_probe()
761 rc = idxd_probe(idxd); in idxd_pci_probe()
768 rc = data->load_device_defaults(idxd); in idxd_pci_probe()
773 rc = idxd_register_devices(idxd); in idxd_pci_probe()
779 rc = idxd_device_init_debugfs(idxd); in idxd_pci_probe()
784 idxd->hw.version); in idxd_pci_probe()
786 idxd->user_submission_safe = data->user_submission_safe; in idxd_pci_probe()
791 idxd_cleanup(idxd); in idxd_pci_probe()
793 pci_iounmap(pdev, idxd->reg_base); in idxd_pci_probe()
795 put_device(idxd_confdev(idxd)); in idxd_pci_probe()
801 void idxd_wqs_quiesce(struct idxd_device *idxd) in idxd_wqs_quiesce() argument
806 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_quiesce()
807 wq = idxd->wqs[i]; in idxd_wqs_quiesce()
815 struct idxd_device *idxd = pci_get_drvdata(pdev); in idxd_shutdown() local
819 rc = idxd_device_disable(idxd); in idxd_shutdown()
823 irq_entry = &idxd->ie; in idxd_shutdown()
825 idxd_mask_error_interrupts(idxd); in idxd_shutdown()
826 flush_workqueue(idxd->wq); in idxd_shutdown()
831 struct idxd_device *idxd = pci_get_drvdata(pdev); in idxd_remove() local
834 idxd_unregister_devices(idxd); in idxd_remove()
842 get_device(idxd_confdev(idxd)); in idxd_remove()
843 device_unregister(idxd_confdev(idxd)); in idxd_remove()
845 if (device_pasid_enabled(idxd)) in idxd_remove()
846 idxd_disable_system_pasid(idxd); in idxd_remove()
847 idxd_device_remove_debugfs(idxd); in idxd_remove()
849 irq_entry = idxd_get_ie(idxd, 0); in idxd_remove()
852 pci_iounmap(pdev, idxd->reg_base); in idxd_remove()
853 if (device_user_pasid_enabled(idxd)) in idxd_remove()
856 destroy_workqueue(idxd->wq); in idxd_remove()
857 perfmon_pmu_remove(idxd); in idxd_remove()
858 put_device(idxd_confdev(idxd)); in idxd_remove()