Lines Matching full:gt

58 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)  in pf_send_vf_cfg_reset()  argument
60 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_cfg_reset()
72 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) in pf_send_vf_cfg_klvs() argument
75 struct xe_tile *tile = gt_to_tile(gt); in pf_send_vf_cfg_klvs()
77 struct xe_guc *guc = &gt->uc.guc; in pf_send_vf_cfg_klvs()
103 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, in pf_push_vf_cfg_klvs() argument
108 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); in pf_push_vf_cfg_klvs()
110 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); in pf_push_vf_cfg_klvs()
114 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_vf_cfg_klvs()
117 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", in pf_push_vf_cfg_klvs()
125 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_vf_cfg_klvs()
133 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) in pf_push_vf_cfg_u32() argument
140 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); in pf_push_vf_cfg_u32()
143 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value) in pf_push_vf_cfg_u64() argument
151 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv)); in pf_push_vf_cfg_u64()
154 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size) in pf_push_vf_cfg_ggtt() argument
165 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); in pf_push_vf_cfg_ggtt()
168 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) in pf_push_vf_cfg_ctxs() argument
177 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); in pf_push_vf_cfg_ctxs()
180 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num) in pf_push_vf_cfg_dbs() argument
189 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs)); in pf_push_vf_cfg_dbs()
192 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum) in pf_push_vf_cfg_exec_quantum() argument
197 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum); in pf_push_vf_cfg_exec_quantum()
200 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout) in pf_push_vf_cfg_preempt_timeout() argument
205 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout); in pf_push_vf_cfg_preempt_timeout()
208 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) in pf_push_vf_cfg_lmem() argument
210 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size); in pf_push_vf_cfg_lmem()
213 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid, in pf_push_vf_cfg_threshold() argument
218 xe_gt_assert(gt, key); in pf_push_vf_cfg_threshold()
219 return pf_push_vf_cfg_u32(gt, vfid, key, value); in pf_push_vf_cfg_threshold()
222 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid) in pf_pick_vf_config() argument
224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_pick_vf_config()
225 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in pf_pick_vf_config()
226 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_pick_vf_config()
228 return &gt->sriov.pf.vfs[vfid].config; in pf_pick_vf_config()
291 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) in pf_push_full_vf_config() argument
293 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_push_full_vf_config()
305 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); in pf_push_full_vf_config()
307 if (xe_gt_is_media_type(gt)) { in pf_push_full_vf_config()
308 struct xe_gt *primary = gt->tile->primary_gt; in pf_push_full_vf_config()
311 /* media-GT will never include a GGTT config */ in pf_push_full_vf_config()
312 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config)); in pf_push_full_vf_config()
314 /* the GGTT config must be taken from the primary-GT instead */ in pf_push_full_vf_config()
317 xe_gt_assert(gt, num_dwords <= max_cfg_dwords); in pf_push_full_vf_config()
320 err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords); in pf_push_full_vf_config()
326 static u64 pf_get_ggtt_alignment(struct xe_gt *gt) in pf_get_ggtt_alignment() argument
328 struct xe_device *xe = gt_to_xe(gt); in pf_get_ggtt_alignment()
333 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt) in pf_get_min_spare_ggtt() argument
337 pf_get_ggtt_alignment(gt) : SZ_64M; in pf_get_min_spare_ggtt()
340 static u64 pf_get_spare_ggtt(struct xe_gt *gt) in pf_get_spare_ggtt() argument
344 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_get_spare_ggtt()
345 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_ggtt()
346 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_get_spare_ggtt()
348 spare = gt->sriov.pf.spare.ggtt_size; in pf_get_spare_ggtt()
349 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt)); in pf_get_spare_ggtt()
354 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) in pf_set_spare_ggtt() argument
356 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_set_spare_ggtt()
357 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_ggtt()
358 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_set_spare_ggtt()
360 if (size && size < pf_get_min_spare_ggtt(gt)) in pf_set_spare_ggtt()
363 size = round_up(size, pf_get_ggtt_alignment(gt)); in pf_set_spare_ggtt()
364 gt->sriov.pf.spare.ggtt_size = size; in pf_set_spare_ggtt()
395 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_release_vf_config_ggtt() argument
397 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region); in pf_release_vf_config_ggtt()
401 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) in pf_provision_vf_ggtt() argument
403 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_vf_ggtt()
405 struct xe_tile *tile = gt_to_tile(gt); in pf_provision_vf_ggtt()
407 u64 alignment = pf_get_ggtt_alignment(gt); in pf_provision_vf_ggtt()
410 xe_gt_assert(gt, vfid); in pf_provision_vf_ggtt()
411 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_provision_vf_ggtt()
412 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_provision_vf_ggtt()
421 pf_release_vf_config_ggtt(gt, config); in pf_provision_vf_ggtt()
423 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region)); in pf_provision_vf_ggtt()
437 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n", in pf_provision_vf_ggtt()
440 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size); in pf_provision_vf_ggtt()
451 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) in pf_get_vf_config_ggtt() argument
453 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_vf_config_ggtt()
456 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_get_vf_config_ggtt()
462 * @gt: the &xe_gt
469 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_ggtt() argument
473 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_ggtt()
475 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid); in xe_gt_sriov_pf_config_get_ggtt()
477 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt); in xe_gt_sriov_pf_config_get_ggtt()
478 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_ggtt()
483 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value, in pf_config_set_u64_done() argument
493 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n", in pf_config_set_u64_done()
496 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n", in pf_config_set_u64_done()
503 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n", in pf_config_set_u64_done()
510 * @gt: the &xe_gt (can't be media)
520 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) in xe_gt_sriov_pf_config_set_ggtt() argument
524 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in xe_gt_sriov_pf_config_set_ggtt()
526 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_ggtt()
528 err = pf_provision_vf_ggtt(gt, vfid, size); in xe_gt_sriov_pf_config_set_ggtt()
530 err = pf_set_spare_ggtt(gt, size); in xe_gt_sriov_pf_config_set_ggtt()
531 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_ggtt()
533 return pf_config_set_u64_done(gt, vfid, size, in xe_gt_sriov_pf_config_set_ggtt()
534 xe_gt_sriov_pf_config_get_ggtt(gt, vfid), in xe_gt_sriov_pf_config_set_ggtt()
538 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, in pf_config_bulk_set_u64_done() argument
544 xe_gt_assert(gt, first); in pf_config_bulk_set_u64_done()
545 xe_gt_assert(gt, num_vfs); in pf_config_bulk_set_u64_done()
546 xe_gt_assert(gt, first <= last); in pf_config_bulk_set_u64_done()
549 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err); in pf_config_bulk_set_u64_done()
552 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", in pf_config_bulk_set_u64_done()
555 pf_config_bulk_set_u64_done(gt, first, last - first, value, in pf_config_bulk_set_u64_done()
557 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err); in pf_config_bulk_set_u64_done()
561 value = get(gt, first); in pf_config_bulk_set_u64_done()
563 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n", in pf_config_bulk_set_u64_done()
570 * @gt: the &xe_gt (can't be media)
579 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_bulk_set_ggtt() argument
585 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_bulk_set_ggtt()
586 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in xe_gt_sriov_pf_config_bulk_set_ggtt()
591 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_ggtt()
593 err = pf_provision_vf_ggtt(gt, n, size); in xe_gt_sriov_pf_config_bulk_set_ggtt()
597 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_ggtt()
599 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, in xe_gt_sriov_pf_config_bulk_set_ggtt()
605 static u64 pf_get_max_ggtt(struct xe_gt *gt) in pf_get_max_ggtt() argument
607 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; in pf_get_max_ggtt()
608 u64 alignment = pf_get_ggtt_alignment(gt); in pf_get_max_ggtt()
609 u64 spare = pf_get_spare_ggtt(gt); in pf_get_max_ggtt()
614 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", in pf_get_max_ggtt()
619 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) in pf_estimate_fair_ggtt() argument
621 u64 available = pf_get_max_ggtt(gt); in pf_estimate_fair_ggtt()
622 u64 alignment = pf_get_ggtt_alignment(gt); in pf_estimate_fair_ggtt()
635 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n", in pf_estimate_fair_ggtt()
642 * @gt: the &xe_gt (can't be media)
650 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_fair_ggtt() argument
655 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_set_fair_ggtt()
656 xe_gt_assert(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_ggtt()
657 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in xe_gt_sriov_pf_config_set_fair_ggtt()
659 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_ggtt()
660 fair = pf_estimate_fair_ggtt(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_ggtt()
661 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_ggtt()
666 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); in xe_gt_sriov_pf_config_set_fair_ggtt()
669 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) in pf_get_min_spare_ctxs() argument
673 hweight64(gt->info.engine_mask) : SZ_256; in pf_get_min_spare_ctxs()
676 static u32 pf_get_spare_ctxs(struct xe_gt *gt) in pf_get_spare_ctxs() argument
680 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_ctxs()
681 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_get_spare_ctxs()
683 spare = gt->sriov.pf.spare.num_ctxs; in pf_get_spare_ctxs()
684 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt)); in pf_get_spare_ctxs()
689 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare) in pf_set_spare_ctxs() argument
691 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_ctxs()
692 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_set_spare_ctxs()
697 if (spare && spare < pf_get_min_spare_ctxs(gt)) in pf_set_spare_ctxs()
700 gt->sriov.pf.spare.num_ctxs = spare; in pf_set_spare_ctxs()
706 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num) in pf_reserve_ctxs() argument
708 struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm; in pf_reserve_ctxs()
709 unsigned int spare = pf_get_spare_ctxs(gt); in pf_reserve_ctxs()
714 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num) in pf_release_ctxs() argument
716 struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm; in pf_release_ctxs()
722 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_release_config_ctxs() argument
724 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_release_config_ctxs()
726 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs); in pf_release_config_ctxs()
731 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) in pf_provision_vf_ctxs() argument
733 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_vf_ctxs()
736 xe_gt_assert(gt, vfid); in pf_provision_vf_ctxs()
742 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0); in pf_provision_vf_ctxs()
746 pf_release_config_ctxs(gt, config); in pf_provision_vf_ctxs()
752 ret = pf_reserve_ctxs(gt, num_ctxs); in pf_provision_vf_ctxs()
759 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs); in pf_provision_vf_ctxs()
761 pf_release_config_ctxs(gt, config); in pf_provision_vf_ctxs()
765 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n", in pf_provision_vf_ctxs()
770 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid) in pf_get_vf_config_ctxs() argument
772 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_vf_config_ctxs()
779 * @gt: the &xe_gt
787 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_ctxs() argument
791 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_ctxs()
793 num_ctxs = pf_get_vf_config_ctxs(gt, vfid); in xe_gt_sriov_pf_config_get_ctxs()
795 num_ctxs = pf_get_spare_ctxs(gt); in xe_gt_sriov_pf_config_get_ctxs()
796 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_ctxs()
811 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual, in pf_config_set_u32_done() argument
819 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n", in pf_config_set_u32_done()
821 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n", in pf_config_set_u32_done()
827 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n", in pf_config_set_u32_done()
834 * @gt: the &xe_gt
842 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs) in xe_gt_sriov_pf_config_set_ctxs() argument
846 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_ctxs()
848 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs); in xe_gt_sriov_pf_config_set_ctxs()
850 err = pf_set_spare_ctxs(gt, num_ctxs); in xe_gt_sriov_pf_config_set_ctxs()
851 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_ctxs()
853 return pf_config_set_u32_done(gt, vfid, num_ctxs, in xe_gt_sriov_pf_config_set_ctxs()
854 xe_gt_sriov_pf_config_get_ctxs(gt, vfid), in xe_gt_sriov_pf_config_set_ctxs()
858 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, in pf_config_bulk_set_u32_done() argument
863 xe_gt_assert(gt, first); in pf_config_bulk_set_u32_done()
864 xe_gt_assert(gt, num_vfs); in pf_config_bulk_set_u32_done()
865 xe_gt_assert(gt, first <= last); in pf_config_bulk_set_u32_done()
868 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); in pf_config_bulk_set_u32_done()
871 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", in pf_config_bulk_set_u32_done()
874 pf_config_bulk_set_u32_done(gt, first, last - first, value, in pf_config_bulk_set_u32_done()
876 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err); in pf_config_bulk_set_u32_done()
880 value = get(gt, first); in pf_config_bulk_set_u32_done()
881 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", in pf_config_bulk_set_u32_done()
888 * @gt: the &xe_gt
897 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_bulk_set_ctxs() argument
903 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_bulk_set_ctxs()
908 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_ctxs()
910 err = pf_provision_vf_ctxs(gt, n, num_ctxs); in xe_gt_sriov_pf_config_bulk_set_ctxs()
914 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_ctxs()
916 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs, in xe_gt_sriov_pf_config_bulk_set_ctxs()
921 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) in pf_estimate_fair_ctxs() argument
923 struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm; in pf_estimate_fair_ctxs()
924 u32 spare = pf_get_spare_ctxs(gt); in pf_estimate_fair_ctxs()
936 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair); in pf_estimate_fair_ctxs()
942 * @gt: the &xe_gt
950 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_fair_ctxs() argument
955 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_set_fair_ctxs()
956 xe_gt_assert(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_ctxs()
958 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_ctxs()
959 fair = pf_estimate_fair_ctxs(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_ctxs()
960 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_ctxs()
965 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); in xe_gt_sriov_pf_config_set_fair_ctxs()
968 static u32 pf_get_min_spare_dbs(struct xe_gt *gt) in pf_get_min_spare_dbs() argument
974 static u32 pf_get_spare_dbs(struct xe_gt *gt) in pf_get_spare_dbs() argument
978 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_dbs()
979 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_get_spare_dbs()
981 spare = gt->sriov.pf.spare.num_dbs; in pf_get_spare_dbs()
982 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt)); in pf_get_spare_dbs()
987 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare) in pf_set_spare_dbs() argument
989 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_dbs()
990 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_set_spare_dbs()
995 if (spare && spare < pf_get_min_spare_dbs(gt)) in pf_set_spare_dbs()
998 gt->sriov.pf.spare.num_dbs = spare; in pf_set_spare_dbs()
1003 static int pf_reserve_dbs(struct xe_gt *gt, u32 num) in pf_reserve_dbs() argument
1005 struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm; in pf_reserve_dbs()
1006 unsigned int spare = pf_get_spare_dbs(gt); in pf_reserve_dbs()
1011 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num) in pf_release_dbs() argument
1013 struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm; in pf_release_dbs()
1019 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_release_config_dbs() argument
1021 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_release_config_dbs()
1023 pf_release_dbs(gt, config->begin_db, config->num_dbs); in pf_release_config_dbs()
1028 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) in pf_provision_vf_dbs() argument
1030 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_vf_dbs()
1033 xe_gt_assert(gt, vfid); in pf_provision_vf_dbs()
1039 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0); in pf_provision_vf_dbs()
1043 pf_release_config_dbs(gt, config); in pf_provision_vf_dbs()
1049 ret = pf_reserve_dbs(gt, num_dbs); in pf_provision_vf_dbs()
1056 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs); in pf_provision_vf_dbs()
1058 pf_release_config_dbs(gt, config); in pf_provision_vf_dbs()
1062 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n", in pf_provision_vf_dbs()
1067 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid) in pf_get_vf_config_dbs() argument
1069 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_vf_config_dbs()
1076 * @gt: the &xe_gt
1084 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_dbs() argument
1088 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_get_dbs()
1089 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in xe_gt_sriov_pf_config_get_dbs()
1091 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_dbs()
1093 num_dbs = pf_get_vf_config_dbs(gt, vfid); in xe_gt_sriov_pf_config_get_dbs()
1095 num_dbs = pf_get_spare_dbs(gt); in xe_gt_sriov_pf_config_get_dbs()
1096 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_dbs()
1103 * @gt: the &xe_gt
1111 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) in xe_gt_sriov_pf_config_set_dbs() argument
1115 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_set_dbs()
1116 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in xe_gt_sriov_pf_config_set_dbs()
1118 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_dbs()
1120 err = pf_provision_vf_dbs(gt, vfid, num_dbs); in xe_gt_sriov_pf_config_set_dbs()
1122 err = pf_set_spare_dbs(gt, num_dbs); in xe_gt_sriov_pf_config_set_dbs()
1123 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_dbs()
1125 return pf_config_set_u32_done(gt, vfid, num_dbs, in xe_gt_sriov_pf_config_set_dbs()
1126 xe_gt_sriov_pf_config_get_dbs(gt, vfid), in xe_gt_sriov_pf_config_set_dbs()
1132 * @gt: the &xe_gt
1141 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_bulk_set_dbs() argument
1147 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_bulk_set_dbs()
1152 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_dbs()
1154 err = pf_provision_vf_dbs(gt, n, num_dbs); in xe_gt_sriov_pf_config_bulk_set_dbs()
1158 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_dbs()
1160 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs, in xe_gt_sriov_pf_config_bulk_set_dbs()
1165 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) in pf_estimate_fair_dbs() argument
1167 struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm; in pf_estimate_fair_dbs()
1168 u32 spare = pf_get_spare_dbs(gt); in pf_estimate_fair_dbs()
1180 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair); in pf_estimate_fair_dbs()
1186 * @gt: the &xe_gt
1194 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_fair_dbs() argument
1199 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_set_fair_dbs()
1200 xe_gt_assert(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_dbs()
1202 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_dbs()
1203 fair = pf_estimate_fair_dbs(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_dbs()
1204 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_dbs()
1209 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); in xe_gt_sriov_pf_config_set_fair_dbs()
1212 static u64 pf_get_lmem_alignment(struct xe_gt *gt) in pf_get_lmem_alignment() argument
1218 static u64 pf_get_min_spare_lmem(struct xe_gt *gt) in pf_get_min_spare_lmem() argument
1224 static u64 pf_get_spare_lmem(struct xe_gt *gt) in pf_get_spare_lmem() argument
1228 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_get_spare_lmem()
1229 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_get_spare_lmem()
1231 spare = gt->sriov.pf.spare.lmem_size; in pf_get_spare_lmem()
1232 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt)); in pf_get_spare_lmem()
1237 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size) in pf_set_spare_lmem() argument
1239 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_set_spare_lmem()
1240 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_set_spare_lmem()
1242 if (size && size < pf_get_min_spare_lmem(gt)) in pf_set_spare_lmem()
1245 gt->sriov.pf.spare.lmem_size = size; in pf_set_spare_lmem()
1249 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) in pf_get_vf_config_lmem() argument
1251 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_vf_config_lmem()
1258 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) in pf_distribute_config_lmem() argument
1260 struct xe_device *xe = gt_to_xe(gt); in pf_distribute_config_lmem()
1266 if (tile->primary_gt == gt) { in pf_distribute_config_lmem()
1267 err = pf_push_vf_cfg_lmem(gt, vfid, size); in pf_distribute_config_lmem()
1273 err = pf_push_vf_cfg_lmem(gt, vfid, lmem); in pf_distribute_config_lmem()
1307 struct xe_gt *gt; in pf_update_vf_lmtt() local
1332 for_each_gt(gt, xe, gtid) { in pf_update_vf_lmtt()
1333 if (xe_gt_is_media_type(gt)) in pf_update_vf_lmtt()
1336 config = pf_pick_vf_config(gt, vfid); in pf_update_vf_lmtt()
1359 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_release_vf_config_lmem() argument
1361 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); in pf_release_vf_config_lmem()
1362 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_release_vf_config_lmem()
1363 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_release_vf_config_lmem()
1371 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) in pf_provision_vf_lmem() argument
1373 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_vf_lmem()
1374 struct xe_device *xe = gt_to_xe(gt); in pf_provision_vf_lmem()
1375 struct xe_tile *tile = gt_to_tile(gt); in pf_provision_vf_lmem()
1379 xe_gt_assert(gt, vfid); in pf_provision_vf_lmem()
1380 xe_gt_assert(gt, IS_DGFX(xe)); in pf_provision_vf_lmem()
1381 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in pf_provision_vf_lmem()
1383 size = round_up(size, pf_get_lmem_alignment(gt)); in pf_provision_vf_lmem()
1386 err = pf_distribute_config_lmem(gt, vfid, 0); in pf_provision_vf_lmem()
1391 pf_release_vf_config_lmem(gt, config); in pf_provision_vf_lmem()
1393 xe_gt_assert(gt, !config->lmem_obj); in pf_provision_vf_lmem()
1398 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); in pf_provision_vf_lmem()
1414 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size); in pf_provision_vf_lmem()
1418 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", in pf_provision_vf_lmem()
1425 pf_release_vf_config_lmem(gt, config); in pf_provision_vf_lmem()
1431 * @gt: the &xe_gt
1438 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_lmem() argument
1442 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_lmem()
1444 size = pf_get_vf_config_lmem(gt, vfid); in xe_gt_sriov_pf_config_get_lmem()
1446 size = pf_get_spare_lmem(gt); in xe_gt_sriov_pf_config_get_lmem()
1447 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_lmem()
1454 * @gt: the &xe_gt (can't be media)
1460 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) in xe_gt_sriov_pf_config_set_lmem() argument
1464 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_lmem()
1466 err = pf_provision_vf_lmem(gt, vfid, size); in xe_gt_sriov_pf_config_set_lmem()
1468 err = pf_set_spare_lmem(gt, size); in xe_gt_sriov_pf_config_set_lmem()
1469 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_lmem()
1471 return pf_config_set_u64_done(gt, vfid, size, in xe_gt_sriov_pf_config_set_lmem()
1472 xe_gt_sriov_pf_config_get_lmem(gt, vfid), in xe_gt_sriov_pf_config_set_lmem()
1478 * @gt: the &xe_gt (can't be media)
1487 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_bulk_set_lmem() argument
1493 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_bulk_set_lmem()
1494 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in xe_gt_sriov_pf_config_bulk_set_lmem()
1499 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_lmem()
1501 err = pf_provision_vf_lmem(gt, n, size); in xe_gt_sriov_pf_config_bulk_set_lmem()
1505 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_bulk_set_lmem()
1507 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, in xe_gt_sriov_pf_config_bulk_set_lmem()
1512 static u64 pf_query_free_lmem(struct xe_gt *gt) in pf_query_free_lmem() argument
1514 struct xe_tile *tile = gt->tile; in pf_query_free_lmem()
1519 static u64 pf_query_max_lmem(struct xe_gt *gt) in pf_query_max_lmem() argument
1521 u64 alignment = pf_get_lmem_alignment(gt); in pf_query_max_lmem()
1522 u64 spare = pf_get_spare_lmem(gt); in pf_query_max_lmem()
1523 u64 free = pf_query_free_lmem(gt); in pf_query_max_lmem()
1539 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) in pf_estimate_fair_lmem() argument
1541 u64 available = pf_query_max_lmem(gt); in pf_estimate_fair_lmem()
1542 u64 alignment = pf_get_lmem_alignment(gt); in pf_estimate_fair_lmem()
1551 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n", in pf_estimate_fair_lmem()
1558 * @gt: the &xe_gt (can't be media)
1566 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_fair_lmem() argument
1571 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_set_fair_lmem()
1572 xe_gt_assert(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_lmem()
1573 xe_gt_assert(gt, !xe_gt_is_media_type(gt)); in xe_gt_sriov_pf_config_set_fair_lmem()
1575 if (!IS_DGFX(gt_to_xe(gt))) in xe_gt_sriov_pf_config_set_fair_lmem()
1578 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_lmem()
1579 fair = pf_estimate_fair_lmem(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair_lmem()
1580 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_fair_lmem()
1585 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair); in xe_gt_sriov_pf_config_set_fair_lmem()
1590 * @gt: the &xe_gt
1598 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_fair() argument
1604 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_set_fair()
1605 xe_gt_assert(gt, num_vfs); in xe_gt_sriov_pf_config_set_fair()
1607 if (!xe_gt_is_media_type(gt)) { in xe_gt_sriov_pf_config_set_fair()
1608 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); in xe_gt_sriov_pf_config_set_fair()
1610 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); in xe_gt_sriov_pf_config_set_fair()
1613 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs); in xe_gt_sriov_pf_config_set_fair()
1615 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs); in xe_gt_sriov_pf_config_set_fair()
1626 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, in pf_provision_exec_quantum() argument
1629 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_exec_quantum()
1632 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum); in pf_provision_exec_quantum()
1640 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) in pf_get_exec_quantum() argument
1642 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_exec_quantum()
1649 * @gt: the &xe_gt
1657 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_exec_quantum() argument
1662 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_exec_quantum()
1663 err = pf_provision_exec_quantum(gt, vfid, exec_quantum); in xe_gt_sriov_pf_config_set_exec_quantum()
1664 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_exec_quantum()
1666 return pf_config_set_u32_done(gt, vfid, exec_quantum, in xe_gt_sriov_pf_config_set_exec_quantum()
1667 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), in xe_gt_sriov_pf_config_set_exec_quantum()
1673 * @gt: the &xe_gt
1680 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_exec_quantum() argument
1684 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_exec_quantum()
1685 exec_quantum = pf_get_exec_quantum(gt, vfid); in xe_gt_sriov_pf_config_get_exec_quantum()
1686 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_exec_quantum()
1696 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, in pf_provision_preempt_timeout() argument
1699 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_preempt_timeout()
1702 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout); in pf_provision_preempt_timeout()
1711 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) in pf_get_preempt_timeout() argument
1713 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_preempt_timeout()
1720 * @gt: the &xe_gt
1728 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_preempt_timeout() argument
1733 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_preempt_timeout()
1734 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); in xe_gt_sriov_pf_config_set_preempt_timeout()
1735 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_preempt_timeout()
1737 return pf_config_set_u32_done(gt, vfid, preempt_timeout, in xe_gt_sriov_pf_config_set_preempt_timeout()
1738 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), in xe_gt_sriov_pf_config_set_preempt_timeout()
1744 * @gt: the &xe_gt
1751 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_get_preempt_timeout() argument
1755 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_preempt_timeout()
1756 preempt_timeout = pf_get_preempt_timeout(gt, vfid); in xe_gt_sriov_pf_config_get_preempt_timeout()
1757 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_preempt_timeout()
1762 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_reset_config_sched() argument
1764 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_reset_config_sched()
1770 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid, in pf_provision_threshold() argument
1773 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_provision_threshold()
1776 err = pf_push_vf_cfg_threshold(gt, vfid, index, value); in pf_provision_threshold()
1785 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid, in pf_get_threshold() argument
1788 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_get_threshold()
1800 * @gt: the &xe_gt
1809 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_set_threshold() argument
1816 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_threshold()
1817 err = pf_provision_threshold(gt, vfid, index, value); in xe_gt_sriov_pf_config_set_threshold()
1818 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_set_threshold()
1820 return pf_config_set_u32_done(gt, vfid, value, in xe_gt_sriov_pf_config_set_threshold()
1821 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index), in xe_gt_sriov_pf_config_set_threshold()
1827 * @gt: the &xe_gt
1835 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid, in xe_gt_sriov_pf_config_get_threshold() argument
1840 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_threshold()
1841 value = pf_get_threshold(gt, vfid, index); in xe_gt_sriov_pf_config_get_threshold()
1842 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_get_threshold()
1847 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config) in pf_reset_config_thresholds() argument
1849 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); in pf_reset_config_thresholds()
1859 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) in pf_release_vf_config() argument
1861 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_release_vf_config()
1862 struct xe_device *xe = gt_to_xe(gt); in pf_release_vf_config()
1864 if (!xe_gt_is_media_type(gt)) { in pf_release_vf_config()
1865 pf_release_vf_config_ggtt(gt, config); in pf_release_vf_config()
1867 pf_release_vf_config_lmem(gt, config); in pf_release_vf_config()
1871 pf_release_config_ctxs(gt, config); in pf_release_vf_config()
1872 pf_release_config_dbs(gt, config); in pf_release_vf_config()
1873 pf_reset_config_sched(gt, config); in pf_release_vf_config()
1874 pf_reset_config_thresholds(gt, config); in pf_release_vf_config()
1879 * @gt: the &xe_gt
1887 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force) in xe_gt_sriov_pf_config_release() argument
1891 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_release()
1893 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_release()
1894 err = pf_send_vf_cfg_reset(gt, vfid); in xe_gt_sriov_pf_config_release()
1896 pf_release_vf_config(gt, vfid); in xe_gt_sriov_pf_config_release()
1897 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_release()
1900 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n", in xe_gt_sriov_pf_config_release()
1943 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout) in pf_sanitize_vf_resources() argument
1945 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); in pf_sanitize_vf_resources()
1946 struct xe_tile *tile = gt_to_tile(gt); in pf_sanitize_vf_resources()
1947 struct xe_device *xe = gt_to_xe(gt); in pf_sanitize_vf_resources()
1954 if (!xe_gt_is_media_type(gt)) { in pf_sanitize_vf_resources()
1965 * @gt: the &xe_gt
1973 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout) in xe_gt_sriov_pf_config_sanitize() argument
1977 xe_gt_assert(gt, vfid != PFID); in xe_gt_sriov_pf_config_sanitize()
1979 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_sanitize()
1980 err = pf_sanitize_vf_resources(gt, vfid, timeout); in xe_gt_sriov_pf_config_sanitize()
1981 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_sanitize()
1984 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n", in xe_gt_sriov_pf_config_sanitize()
1991 * @gt: the &xe_gt
1999 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh) in xe_gt_sriov_pf_config_push() argument
2003 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_push()
2005 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_push()
2007 err = pf_send_vf_cfg_reset(gt, vfid); in xe_gt_sriov_pf_config_push()
2009 err = pf_push_full_vf_config(gt, vfid); in xe_gt_sriov_pf_config_push()
2010 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_push()
2013 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n", in xe_gt_sriov_pf_config_push()
2020 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) in pf_validate_vf_config() argument
2022 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt; in pf_validate_vf_config()
2023 struct xe_device *xe = gt_to_xe(gt); in pf_validate_vf_config()
2024 bool is_primary = !xe_gt_is_media_type(gt); in pf_validate_vf_config()
2029 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid); in pf_validate_vf_config()
2030 valid_dbs = pf_get_vf_config_dbs(gt, vfid); in pf_validate_vf_config()
2036 /* and GGTT/LMEM is configured on primary GT only */ in pf_validate_vf_config()
2052 * @gt: the &xe_gt
2059 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_config_is_empty() argument
2063 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_is_empty()
2064 xe_gt_assert(gt, vfid); in xe_gt_sriov_pf_config_is_empty()
2066 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_is_empty()
2067 empty = pf_validate_vf_config(gt, vfid) == -ENODATA; in xe_gt_sriov_pf_config_is_empty()
2068 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_is_empty()
2074 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2075 * @gt: the &xe_gt
2077 * Any prior configurations pushed to GuC are lost when the GT is reset.
2082 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) in xe_gt_sriov_pf_config_restart() argument
2084 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_config_restart()
2088 if (xe_gt_sriov_pf_config_is_empty(gt, n)) in xe_gt_sriov_pf_config_restart()
2090 else if (xe_gt_sriov_pf_config_push(gt, n, false)) in xe_gt_sriov_pf_config_restart()
2095 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n", in xe_gt_sriov_pf_config_restart()
2099 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n", in xe_gt_sriov_pf_config_restart()
2105 * @gt: the &xe_gt
2113 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p) in xe_gt_sriov_pf_config_print_ggtt() argument
2115 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_config_print_ggtt()
2120 config = &gt->sriov.pf.vfs[n].config; in xe_gt_sriov_pf_config_print_ggtt()
2137 * @gt: the &xe_gt
2146 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p) in xe_gt_sriov_pf_config_print_ctxs() argument
2148 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_config_print_ctxs()
2151 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_print_ctxs()
2152 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_ctxs()
2155 config = &gt->sriov.pf.vfs[n].config; in xe_gt_sriov_pf_config_print_ctxs()
2166 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_ctxs()
2172 * @gt: the &xe_gt
2181 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) in xe_gt_sriov_pf_config_print_dbs() argument
2183 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_config_print_dbs()
2186 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_print_dbs()
2187 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_dbs()
2190 config = &gt->sriov.pf.vfs[n].config; in xe_gt_sriov_pf_config_print_dbs()
2201 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_dbs()
2207 * @gt: the &xe_gt
2214 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p) in xe_gt_sriov_pf_config_print_available_ggtt() argument
2216 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; in xe_gt_sriov_pf_config_print_available_ggtt()
2217 u64 alignment = pf_get_ggtt_alignment(gt); in xe_gt_sriov_pf_config_print_available_ggtt()
2221 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_config_print_available_ggtt()
2223 mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_available_ggtt()
2225 spare = pf_get_spare_ggtt(gt); in xe_gt_sriov_pf_config_print_available_ggtt()
2228 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); in xe_gt_sriov_pf_config_print_available_ggtt()