Lines Matching refs:idxd
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd) in idxd_unmask_error_interrupts() argument
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
31 void idxd_mask_error_interrupts(struct idxd_device *idxd) in idxd_mask_error_interrupts() argument
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
53 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs()
86 struct device *dev = &wq->idxd->pdev->dev; in alloc_descs()
110 struct idxd_device *idxd = wq->idxd; in idxd_wq_alloc_resources() local
111 struct device *dev = &idxd->pdev->dev; in idxd_wq_alloc_resources()
124 wq->compls_size = num_descs * idxd->data->compl_size; in idxd_wq_alloc_resources()
144 if (idxd->data->type == IDXD_TYPE_DSA) in idxd_wq_alloc_resources()
146 else if (idxd->data->type == IDXD_TYPE_IAX) in idxd_wq_alloc_resources()
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; in idxd_wq_alloc_resources()
168 struct device *dev = &wq->idxd->pdev->dev; in idxd_wq_free_resources()
182 struct idxd_device *idxd = wq->idxd; in idxd_wq_enable() local
183 struct device *dev = &idxd->pdev->dev; in idxd_wq_enable()
191 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); in idxd_wq_enable()
200 set_bit(wq->id, idxd->wq_enable_map); in idxd_wq_enable()
207 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable() local
208 struct device *dev = &idxd->pdev->dev; in idxd_wq_disable()
219 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status); in idxd_wq_disable()
228 clear_bit(wq->id, idxd->wq_enable_map); in idxd_wq_disable()
236 struct idxd_device *idxd = wq->idxd; in idxd_wq_drain() local
237 struct device *dev = &idxd->pdev->dev; in idxd_wq_drain()
247 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); in idxd_wq_drain()
252 struct idxd_device *idxd = wq->idxd; in idxd_wq_reset() local
253 struct device *dev = &idxd->pdev->dev; in idxd_wq_reset()
262 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); in idxd_wq_reset()
268 struct idxd_device *idxd = wq->idxd; in idxd_wq_map_portal() local
269 struct pci_dev *pdev = idxd->pdev; in idxd_wq_map_portal()
285 struct device *dev = &wq->idxd->pdev->dev; in idxd_wq_unmap_portal()
292 void idxd_wqs_unmap_portal(struct idxd_device *idxd) in idxd_wqs_unmap_portal() argument
296 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_unmap_portal()
297 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_unmap_portal()
306 struct idxd_device *idxd = wq->idxd; in __idxd_wq_set_pasid_locked() local
310 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); in __idxd_wq_set_pasid_locked()
311 spin_lock(&idxd->dev_lock); in __idxd_wq_set_pasid_locked()
312 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); in __idxd_wq_set_pasid_locked()
316 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); in __idxd_wq_set_pasid_locked()
317 spin_unlock(&idxd->dev_lock); in __idxd_wq_set_pasid_locked()
339 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_pasid() local
348 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); in idxd_wq_disable_pasid()
349 spin_lock(&idxd->dev_lock); in idxd_wq_disable_pasid()
350 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); in idxd_wq_disable_pasid()
353 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); in idxd_wq_disable_pasid()
354 spin_unlock(&idxd->dev_lock); in idxd_wq_disable_pasid()
365 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_cleanup() local
369 memset(wq->wqcfg, 0, idxd->wqcfg_size); in idxd_wq_disable_cleanup()
377 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); in idxd_wq_disable_cleanup()
379 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); in idxd_wq_disable_cleanup()
431 static inline bool idxd_is_enabled(struct idxd_device *idxd) in idxd_is_enabled() argument
435 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_is_enabled()
442 static inline bool idxd_device_is_halted(struct idxd_device *idxd) in idxd_device_is_halted() argument
446 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_device_is_halted()
456 int idxd_device_init_reset(struct idxd_device *idxd) in idxd_device_init_reset() argument
458 struct device *dev = &idxd->pdev->dev; in idxd_device_init_reset()
461 if (idxd_device_is_halted(idxd)) { in idxd_device_init_reset()
462 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); in idxd_device_init_reset()
469 spin_lock(&idxd->cmd_lock); in idxd_device_init_reset()
470 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_device_init_reset()
472 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & in idxd_device_init_reset()
475 spin_unlock(&idxd->cmd_lock); in idxd_device_init_reset()
479 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, in idxd_cmd_exec() argument
487 if (idxd_device_is_halted(idxd)) { in idxd_cmd_exec()
488 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); in idxd_cmd_exec()
499 spin_lock_irqsave(&idxd->cmd_lock, flags); in idxd_cmd_exec()
500 wait_event_lock_irq(idxd->cmd_waitq, in idxd_cmd_exec()
501 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), in idxd_cmd_exec()
502 idxd->cmd_lock); in idxd_cmd_exec()
504 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", in idxd_cmd_exec()
507 idxd->cmd_status = 0; in idxd_cmd_exec()
508 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); in idxd_cmd_exec()
509 idxd->cmd_done = &done; in idxd_cmd_exec()
510 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_cmd_exec()
516 spin_unlock_irqrestore(&idxd->cmd_lock, flags); in idxd_cmd_exec()
518 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); in idxd_cmd_exec()
519 spin_lock(&idxd->cmd_lock); in idxd_cmd_exec()
522 idxd->cmd_status = stat & GENMASK(7, 0); in idxd_cmd_exec()
524 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); in idxd_cmd_exec()
526 wake_up(&idxd->cmd_waitq); in idxd_cmd_exec()
527 spin_unlock(&idxd->cmd_lock); in idxd_cmd_exec()
530 int idxd_device_enable(struct idxd_device *idxd) in idxd_device_enable() argument
532 struct device *dev = &idxd->pdev->dev; in idxd_device_enable()
535 if (idxd_is_enabled(idxd)) { in idxd_device_enable()
540 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status); in idxd_device_enable()
549 idxd->state = IDXD_DEV_ENABLED; in idxd_device_enable()
553 int idxd_device_disable(struct idxd_device *idxd) in idxd_device_disable() argument
555 struct device *dev = &idxd->pdev->dev; in idxd_device_disable()
558 if (!idxd_is_enabled(idxd)) { in idxd_device_disable()
563 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status); in idxd_device_disable()
572 idxd_device_clear_state(idxd); in idxd_device_disable()
576 void idxd_device_reset(struct idxd_device *idxd) in idxd_device_reset() argument
578 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); in idxd_device_reset()
579 idxd_device_clear_state(idxd); in idxd_device_reset()
580 spin_lock(&idxd->dev_lock); in idxd_device_reset()
581 idxd_unmask_error_interrupts(idxd); in idxd_device_reset()
582 spin_unlock(&idxd->dev_lock); in idxd_device_reset()
585 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) in idxd_device_drain_pasid() argument
587 struct device *dev = &idxd->pdev->dev; in idxd_device_drain_pasid()
592 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL); in idxd_device_drain_pasid()
596 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, in idxd_device_request_int_handle() argument
599 struct device *dev = &idxd->pdev->dev; in idxd_device_request_int_handle()
602 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) in idxd_device_request_int_handle()
613 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status); in idxd_device_request_int_handle()
626 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, in idxd_device_release_int_handle() argument
629 struct device *dev = &idxd->pdev->dev; in idxd_device_release_int_handle()
633 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) in idxd_device_release_int_handle()
649 spin_lock(&idxd->cmd_lock); in idxd_device_release_int_handle()
650 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_device_release_int_handle()
652 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) in idxd_device_release_int_handle()
654 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); in idxd_device_release_int_handle()
655 spin_unlock(&idxd->cmd_lock); in idxd_device_release_int_handle()
667 static void idxd_engines_clear_state(struct idxd_device *idxd) in idxd_engines_clear_state() argument
672 lockdep_assert_held(&idxd->dev_lock); in idxd_engines_clear_state()
673 for (i = 0; i < idxd->max_engines; i++) { in idxd_engines_clear_state()
674 engine = idxd->engines[i]; in idxd_engines_clear_state()
679 static void idxd_groups_clear_state(struct idxd_device *idxd) in idxd_groups_clear_state() argument
684 lockdep_assert_held(&idxd->dev_lock); in idxd_groups_clear_state()
685 for (i = 0; i < idxd->max_groups; i++) { in idxd_groups_clear_state()
686 group = idxd->groups[i]; in idxd_groups_clear_state()
695 group->rdbufs_allowed = idxd->max_rdbufs; in idxd_groups_clear_state()
697 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { in idxd_groups_clear_state()
709 static void idxd_device_wqs_clear_state(struct idxd_device *idxd) in idxd_device_wqs_clear_state() argument
713 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_wqs_clear_state()
714 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_wqs_clear_state()
723 void idxd_device_clear_state(struct idxd_device *idxd) in idxd_device_clear_state() argument
726 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_device_clear_state()
731 idxd_device_wqs_clear_state(idxd); in idxd_device_clear_state()
733 spin_lock(&idxd->dev_lock); in idxd_device_clear_state()
734 idxd_groups_clear_state(idxd); in idxd_device_clear_state()
735 idxd_engines_clear_state(idxd); in idxd_device_clear_state()
737 spin_lock(&idxd->dev_lock); in idxd_device_clear_state()
740 idxd->state = IDXD_DEV_DISABLED; in idxd_device_clear_state()
741 spin_unlock(&idxd->dev_lock); in idxd_device_clear_state()
744 static int idxd_device_evl_setup(struct idxd_device *idxd) in idxd_device_evl_setup() argument
749 struct device *dev = &idxd->pdev->dev; in idxd_device_evl_setup()
753 struct idxd_evl *evl = idxd->evl; in idxd_device_evl_setup()
760 size = evl_size(idxd); in idxd_device_evl_setup()
788 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET); in idxd_device_evl_setup()
789 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); in idxd_device_evl_setup()
791 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_setup()
793 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_setup()
795 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_setup()
797 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_setup()
808 static void idxd_device_evl_free(struct idxd_device *idxd) in idxd_device_evl_free() argument
815 struct device *dev = &idxd->pdev->dev; in idxd_device_evl_free()
816 struct idxd_evl *evl = idxd->evl; in idxd_device_evl_free()
818 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_free()
824 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_free()
826 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_free()
828 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_free()
830 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET); in idxd_device_evl_free()
831 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); in idxd_device_evl_free()
846 struct idxd_device *idxd = group->idxd; in idxd_group_config_write() local
847 struct device *dev = &idxd->pdev->dev; in idxd_group_config_write()
855 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); in idxd_group_config_write()
856 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
859 ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
863 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); in idxd_group_config_write()
864 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
866 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
869 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); in idxd_group_config_write()
870 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
873 ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
876 static int idxd_groups_config_write(struct idxd_device *idxd) in idxd_groups_config_write() argument
881 struct device *dev = &idxd->pdev->dev; in idxd_groups_config_write()
884 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { in idxd_groups_config_write()
885 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_groups_config_write()
886 reg.rdbuf_limit = idxd->rdbuf_limit; in idxd_groups_config_write()
887 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_groups_config_write()
891 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); in idxd_groups_config_write()
893 for (i = 0; i < idxd->max_groups; i++) { in idxd_groups_config_write()
894 struct idxd_group *group = idxd->groups[i]; in idxd_groups_config_write()
902 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd) in idxd_device_pasid_priv_enabled() argument
904 struct pci_dev *pdev = idxd->pdev; in idxd_device_pasid_priv_enabled()
913 struct idxd_device *idxd = wq->idxd; in idxd_wq_config_write() local
914 struct device *dev = &idxd->pdev->dev; in idxd_wq_config_write()
925 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { in idxd_wq_config_write()
926 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_config_write()
927 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); in idxd_wq_config_write()
955 !idxd_device_pasid_priv_enabled(idxd) && in idxd_wq_config_write()
957 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV; in idxd_wq_config_write()
963 if (idxd->hw.gen_cap.block_on_fault && in idxd_wq_config_write()
968 if (idxd->hw.wq_cap.wq_ats_support) in idxd_wq_config_write()
971 if (idxd->hw.wq_cap.wq_prs_support) in idxd_wq_config_write()
976 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); in idxd_wq_config_write()
979 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { in idxd_wq_config_write()
990 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { in idxd_wq_config_write()
991 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_config_write()
992 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset); in idxd_wq_config_write()
995 ioread32(idxd->reg_base + wq_offset)); in idxd_wq_config_write()
1001 static int idxd_wqs_config_write(struct idxd_device *idxd) in idxd_wqs_config_write() argument
1005 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_config_write()
1006 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_config_write()
1016 static void idxd_group_flags_setup(struct idxd_device *idxd) in idxd_group_flags_setup() argument
1021 for (i = 0; i < idxd->max_groups; i++) { in idxd_group_flags_setup()
1022 struct idxd_group *group = idxd->groups[i]; in idxd_group_flags_setup()
1040 static int idxd_engines_setup(struct idxd_device *idxd) in idxd_engines_setup() argument
1046 for (i = 0; i < idxd->max_groups; i++) { in idxd_engines_setup()
1047 group = idxd->groups[i]; in idxd_engines_setup()
1051 for (i = 0; i < idxd->max_engines; i++) { in idxd_engines_setup()
1052 eng = idxd->engines[i]; in idxd_engines_setup()
1068 static int idxd_wqs_setup(struct idxd_device *idxd) in idxd_wqs_setup() argument
1073 struct device *dev = &idxd->pdev->dev; in idxd_wqs_setup()
1075 for (i = 0; i < idxd->max_groups; i++) { in idxd_wqs_setup()
1076 group = idxd->groups[i]; in idxd_wqs_setup()
1081 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_setup()
1082 wq = idxd->wqs[i]; in idxd_wqs_setup()
1089 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; in idxd_wqs_setup()
1099 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED; in idxd_wqs_setup()
1106 int idxd_device_config(struct idxd_device *idxd) in idxd_device_config() argument
1110 lockdep_assert_held(&idxd->dev_lock); in idxd_device_config()
1111 rc = idxd_wqs_setup(idxd); in idxd_device_config()
1115 rc = idxd_engines_setup(idxd); in idxd_device_config()
1119 idxd_group_flags_setup(idxd); in idxd_device_config()
1121 rc = idxd_wqs_config_write(idxd); in idxd_device_config()
1125 rc = idxd_groups_config_write(idxd); in idxd_device_config()
1134 struct idxd_device *idxd = wq->idxd; in idxd_wq_load_config() local
1135 struct device *dev = &idxd->pdev->dev; in idxd_wq_load_config()
1139 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); in idxd_wq_load_config()
1140 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); in idxd_wq_load_config()
1154 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); in idxd_wq_load_config()
1156 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { in idxd_wq_load_config()
1157 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_load_config()
1166 struct idxd_device *idxd = group->idxd; in idxd_group_load_config() local
1167 struct device *dev = &idxd->pdev->dev; in idxd_group_load_config()
1177 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); in idxd_group_load_config()
1178 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1182 if (i * 64 >= idxd->max_wqs) in idxd_group_load_config()
1190 if (id >= idxd->max_wqs) in idxd_group_load_config()
1195 wq = idxd->wqs[id]; in idxd_group_load_config()
1201 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); in idxd_group_load_config()
1202 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1208 if (i >= idxd->max_engines) in idxd_group_load_config()
1212 struct idxd_engine *engine = idxd->engines[i]; in idxd_group_load_config()
1218 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); in idxd_group_load_config()
1219 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1224 int idxd_device_load_config(struct idxd_device *idxd) in idxd_device_load_config() argument
1229 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_load_config()
1230 idxd->rdbuf_limit = reg.rdbuf_limit; in idxd_device_load_config()
1232 for (i = 0; i < idxd->max_groups; i++) { in idxd_device_load_config()
1233 struct idxd_group *group = idxd->groups[i]; in idxd_device_load_config()
1238 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_load_config()
1239 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_load_config()
1285 static void idxd_device_set_perm_entry(struct idxd_device *idxd, in idxd_device_set_perm_entry() argument
1296 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); in idxd_device_set_perm_entry()
1299 static void idxd_device_clear_perm_entry(struct idxd_device *idxd, in idxd_device_clear_perm_entry() argument
1302 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); in idxd_device_clear_perm_entry()
1307 struct idxd_device *idxd = wq->idxd; in idxd_wq_free_irq() local
1315 if (idxd->request_int_handles) in idxd_wq_free_irq()
1316 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); in idxd_wq_free_irq()
1317 idxd_device_clear_perm_entry(idxd, ie); in idxd_wq_free_irq()
1325 struct idxd_device *idxd = wq->idxd; in idxd_wq_request_irq() local
1326 struct pci_dev *pdev = idxd->pdev; in idxd_wq_request_irq()
1336 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID; in idxd_wq_request_irq()
1337 idxd_device_set_perm_entry(idxd, ie); in idxd_wq_request_irq()
1345 if (idxd->request_int_handles) { in idxd_wq_request_irq()
1346 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, in idxd_wq_request_irq()
1360 idxd_device_clear_perm_entry(idxd, ie); in idxd_wq_request_irq()
1367 struct idxd_device *idxd = wq->idxd; in idxd_drv_enable_wq() local
1368 struct device *dev = &idxd->pdev->dev; in idxd_drv_enable_wq()
1373 if (idxd->state != IDXD_DEV_ENABLED) { in idxd_drv_enable_wq()
1374 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED; in idxd_drv_enable_wq()
1380 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED; in idxd_drv_enable_wq()
1387 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP; in idxd_drv_enable_wq()
1392 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME; in idxd_drv_enable_wq()
1400 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; in idxd_drv_enable_wq()
1413 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH; in idxd_drv_enable_wq()
1427 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_drv_enable_wq()
1430 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; in idxd_drv_enable_wq()
1438 spin_lock(&idxd->dev_lock); in idxd_drv_enable_wq()
1439 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_drv_enable_wq()
1440 rc = idxd_device_config(idxd); in idxd_drv_enable_wq()
1441 spin_unlock(&idxd->dev_lock); in idxd_drv_enable_wq()
1455 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR; in idxd_drv_enable_wq()
1464 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; in idxd_drv_enable_wq()
1471 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; in idxd_drv_enable_wq()
1478 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; in idxd_drv_enable_wq()
1501 struct idxd_device *idxd = wq->idxd; in idxd_drv_disable_wq() local
1502 struct device *dev = &idxd->pdev->dev; in idxd_drv_disable_wq()
1523 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); in idxd_device_drv_probe() local
1531 if (idxd->state != IDXD_DEV_DISABLED) { in idxd_device_drv_probe()
1532 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED; in idxd_device_drv_probe()
1537 spin_lock(&idxd->dev_lock); in idxd_device_drv_probe()
1538 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_device_drv_probe()
1539 rc = idxd_device_config(idxd); in idxd_device_drv_probe()
1540 spin_unlock(&idxd->dev_lock); in idxd_device_drv_probe()
1550 if (idxd->pasid != IOMMU_PASID_INVALID) in idxd_device_drv_probe()
1551 idxd_set_user_intr(idxd, 1); in idxd_device_drv_probe()
1553 rc = idxd_device_evl_setup(idxd); in idxd_device_drv_probe()
1555 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR; in idxd_device_drv_probe()
1560 rc = idxd_device_enable(idxd); in idxd_device_drv_probe()
1562 idxd_device_evl_free(idxd); in idxd_device_drv_probe()
1567 rc = idxd_register_dma_device(idxd); in idxd_device_drv_probe()
1569 idxd_device_disable(idxd); in idxd_device_drv_probe()
1570 idxd_device_evl_free(idxd); in idxd_device_drv_probe()
1571 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR; in idxd_device_drv_probe()
1575 idxd->cmd_status = 0; in idxd_device_drv_probe()
1582 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); in idxd_device_drv_remove() local
1585 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_drv_remove()
1586 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_drv_remove()
1595 idxd_unregister_dma_device(idxd); in idxd_device_drv_remove()
1596 idxd_device_disable(idxd); in idxd_device_drv_remove()
1597 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_device_drv_remove()
1598 idxd_device_reset(idxd); in idxd_device_drv_remove()
1599 idxd_device_evl_free(idxd); in idxd_device_drv_remove()