1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_migrate.h"
33 #include "xe_sriov.h"
34 #include "xe_ttm_vram_mgr.h"
35 #include "xe_wopcm.h"
36 
37 /*
38  * Return: number of KLVs that were successfully parsed and saved,
39  *         negative error code on failure.
40  */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)41 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
42 				    u64 addr, u32 size)
43 {
44 	u32 request[] = {
45 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
46 		vfid,
47 		lower_32_bits(addr),
48 		upper_32_bits(addr),
49 		size,
50 	};
51 
52 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
53 }
54 
55 /*
56  * Return: 0 on success, negative error code on failure.
57  */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)58 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
59 {
60 	struct xe_guc *guc = &gt->uc.guc;
61 	int ret;
62 
63 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
64 
65 	return ret <= 0 ? ret : -EPROTO;
66 }
67 
68 /*
69  * Return: number of KLVs that were successfully parsed and saved,
70  *         negative error code on failure.
71  */
pf_send_vf_cfg_klvs(struct xe_gt * gt,u32 vfid,const u32 * klvs,u32 num_dwords)72 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
73 {
74 	const u32 bytes = num_dwords * sizeof(u32);
75 	struct xe_tile *tile = gt_to_tile(gt);
76 	struct xe_device *xe = tile_to_xe(tile);
77 	struct xe_guc *guc = &gt->uc.guc;
78 	struct xe_bo *bo;
79 	int ret;
80 
81 	bo = xe_bo_create_pin_map(xe, tile, NULL,
82 				  ALIGN(bytes, PAGE_SIZE),
83 				  ttm_bo_type_kernel,
84 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
85 				  XE_BO_FLAG_GGTT |
86 				  XE_BO_FLAG_GGTT_INVALIDATE);
87 	if (IS_ERR(bo))
88 		return PTR_ERR(bo);
89 
90 	xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
91 
92 	ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
93 
94 	xe_bo_unpin_map_no_vm(bo);
95 
96 	return ret;
97 }
98 
99 /*
100  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
101  *         negative error code on failure.
102  */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)103 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
104 			       const u32 *klvs, u32 num_dwords)
105 {
106 	int ret;
107 
108 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
109 
110 	ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
111 
112 	if (ret != num_klvs) {
113 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
114 		struct drm_printer p = xe_gt_info_printer(gt);
115 		char name[8];
116 
117 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
118 				   xe_sriov_function_name(vfid, name, sizeof(name)),
119 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
120 		xe_guc_klv_print(klvs, num_dwords, &p);
121 		return err;
122 	}
123 
124 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
125 		struct drm_printer p = xe_gt_info_printer(gt);
126 
127 		xe_guc_klv_print(klvs, num_dwords, &p);
128 	}
129 
130 	return 0;
131 }
132 
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)133 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
134 {
135 	u32 klv[] = {
136 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
137 		value,
138 	};
139 
140 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
141 }
142 
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)143 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
144 {
145 	u32 klv[] = {
146 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
147 		lower_32_bits(value),
148 		upper_32_bits(value),
149 	};
150 
151 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
152 }
153 
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)154 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
155 {
156 	u32 klvs[] = {
157 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
158 		lower_32_bits(start),
159 		upper_32_bits(start),
160 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
161 		lower_32_bits(size),
162 		upper_32_bits(size),
163 	};
164 
165 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
166 }
167 
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)168 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
169 {
170 	u32 klvs[] = {
171 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
172 		begin,
173 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
174 		num,
175 	};
176 
177 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
178 }
179 
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)180 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
181 {
182 	u32 klvs[] = {
183 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
184 		begin,
185 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
186 		num,
187 	};
188 
189 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
190 }
191 
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)192 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
193 {
194 	/* GuC will silently clamp values exceeding max */
195 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
196 
197 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
198 }
199 
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)200 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
201 {
202 	/* GuC will silently clamp values exceeding max */
203 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
204 
205 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
206 }
207 
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)208 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
209 {
210 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
211 }
212 
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)213 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
214 				    enum xe_guc_klv_threshold_index index, u32 value)
215 {
216 	u32 key = xe_guc_klv_threshold_index_to_key(index);
217 
218 	xe_gt_assert(gt, key);
219 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
220 }
221 
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)222 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
223 {
224 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
225 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
226 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
227 
228 	return &gt->sriov.pf.vfs[vfid].config;
229 }
230 
231 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config)232 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
233 {
234 	u32 n = 0;
235 
236 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
237 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
238 		cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
239 		cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
240 
241 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
242 		cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
243 		cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
244 	}
245 
246 	return n;
247 }
248 
249 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config)250 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
251 {
252 	u32 n = 0;
253 
254 	n += encode_config_ggtt(cfg, config);
255 
256 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
257 	cfg[n++] = config->begin_ctx;
258 
259 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
260 	cfg[n++] = config->num_ctxs;
261 
262 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
263 	cfg[n++] = config->begin_db;
264 
265 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
266 	cfg[n++] = config->num_dbs;
267 
268 	if (config->lmem_obj) {
269 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
270 		cfg[n++] = lower_32_bits(config->lmem_obj->size);
271 		cfg[n++] = upper_32_bits(config->lmem_obj->size);
272 	}
273 
274 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
275 	cfg[n++] = config->exec_quantum;
276 
277 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
278 	cfg[n++] = config->preempt_timeout;
279 
280 #define encode_threshold_config(TAG, ...) ({					\
281 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
282 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
283 });
284 
285 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
286 #undef encode_threshold_config
287 
288 	return n;
289 }
290 
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)291 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
292 {
293 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
294 	u32 max_cfg_dwords = SZ_4K / sizeof(u32);
295 	u32 num_dwords;
296 	int num_klvs;
297 	u32 *cfg;
298 	int err;
299 
300 	cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
301 	if (!cfg)
302 		return -ENOMEM;
303 
304 	num_dwords = encode_config(cfg, config);
305 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
306 
307 	if (xe_gt_is_media_type(gt)) {
308 		struct xe_gt *primary = gt->tile->primary_gt;
309 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
310 
311 		/* media-GT will never include a GGTT config */
312 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
313 
314 		/* the GGTT config must be taken from the primary-GT instead */
315 		num_dwords += encode_config_ggtt(cfg + num_dwords, other);
316 	}
317 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
318 
319 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
320 	err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
321 
322 	kfree(cfg);
323 	return err;
324 }
325 
pf_get_ggtt_alignment(struct xe_gt * gt)326 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
327 {
328 	struct xe_device *xe = gt_to_xe(gt);
329 
330 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
331 }
332 
pf_get_min_spare_ggtt(struct xe_gt * gt)333 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
334 {
335 	/* XXX: preliminary */
336 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
337 		pf_get_ggtt_alignment(gt) : SZ_64M;
338 }
339 
pf_get_spare_ggtt(struct xe_gt * gt)340 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
341 {
342 	u64 spare;
343 
344 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
345 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
346 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
347 
348 	spare = gt->sriov.pf.spare.ggtt_size;
349 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
350 
351 	return spare;
352 }
353 
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)354 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
355 {
356 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
357 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
358 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
359 
360 	if (size && size < pf_get_min_spare_ggtt(gt))
361 		return -EINVAL;
362 
363 	size = round_up(size, pf_get_ggtt_alignment(gt));
364 	gt->sriov.pf.spare.ggtt_size = size;
365 
366 	return 0;
367 }
368 
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)369 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
370 {
371 	int err, err2 = 0;
372 
373 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
374 
375 	if (tile->media_gt && !err)
376 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
377 
378 	return err ?: err2;
379 }
380 
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)381 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
382 {
383 	if (xe_ggtt_node_allocated(node)) {
384 		/*
385 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
386 		 * is redundant, as PTE will be implicitly re-assigned to PF by
387 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
388 		 */
389 		xe_ggtt_node_remove(node, false);
390 	} else {
391 		xe_ggtt_node_fini(node);
392 	}
393 }
394 
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)395 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
396 {
397 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
398 	config->ggtt_region = NULL;
399 }
400 
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)401 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
402 {
403 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
404 	struct xe_ggtt_node *node;
405 	struct xe_tile *tile = gt_to_tile(gt);
406 	struct xe_ggtt *ggtt = tile->mem.ggtt;
407 	u64 alignment = pf_get_ggtt_alignment(gt);
408 	int err;
409 
410 	xe_gt_assert(gt, vfid);
411 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
412 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
413 
414 	size = round_up(size, alignment);
415 
416 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
417 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
418 		if (unlikely(err))
419 			return err;
420 
421 		pf_release_vf_config_ggtt(gt, config);
422 	}
423 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
424 
425 	if (!size)
426 		return 0;
427 
428 	node = xe_ggtt_node_init(ggtt);
429 	if (IS_ERR(node))
430 		return PTR_ERR(node);
431 
432 	err = xe_ggtt_node_insert(node, size, alignment);
433 	if (unlikely(err))
434 		goto err;
435 
436 	xe_ggtt_assign(node, vfid);
437 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
438 				vfid, node->base.start, node->base.start + node->base.size - 1);
439 
440 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
441 	if (unlikely(err))
442 		goto err;
443 
444 	config->ggtt_region = node;
445 	return 0;
446 err:
447 	pf_release_ggtt(tile, node);
448 	return err;
449 }
450 
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)451 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
452 {
453 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
454 	struct xe_ggtt_node *node = config->ggtt_region;
455 
456 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
457 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
458 }
459 
460 /**
461  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
462  * @gt: the &xe_gt
463  * @vfid: the VF identifier
464  *
465  * This function can only be called on PF.
466  *
467  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
468  */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)469 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
470 {
471 	u64 size;
472 
473 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
474 	if (vfid)
475 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
476 	else
477 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
478 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
479 
480 	return size;
481 }
482 
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)483 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
484 				  u64 actual, const char *what, int err)
485 {
486 	char size[10];
487 	char name[8];
488 
489 	xe_sriov_function_name(vfid, name, sizeof(name));
490 
491 	if (unlikely(err)) {
492 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
493 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
494 				   name, value, size, what, ERR_PTR(err));
495 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
496 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
497 				 name, actual, size, what);
498 		return err;
499 	}
500 
501 	/* the actual value may have changed during provisioning */
502 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
503 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
504 			 name, actual, size, what);
505 	return 0;
506 }
507 
508 /**
509  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
510  * @gt: the &xe_gt (can't be media)
511  * @vfid: the VF identifier
512  * @size: requested GGTT size
513  *
514  * If &vfid represents PF, then function will change PF's spare GGTT config.
515  *
516  * This function can only be called on PF.
517  *
518  * Return: 0 on success or a negative error code on failure.
519  */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)520 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
521 {
522 	int err;
523 
524 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
525 
526 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
527 	if (vfid)
528 		err = pf_provision_vf_ggtt(gt, vfid, size);
529 	else
530 		err = pf_set_spare_ggtt(gt, size);
531 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
532 
533 	return pf_config_set_u64_done(gt, vfid, size,
534 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
535 				      vfid ? "GGTT" : "spare GGTT", err);
536 }
537 
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)538 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
539 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
540 				       const char *what, unsigned int last, int err)
541 {
542 	char size[10];
543 
544 	xe_gt_assert(gt, first);
545 	xe_gt_assert(gt, num_vfs);
546 	xe_gt_assert(gt, first <= last);
547 
548 	if (num_vfs == 1)
549 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
550 
551 	if (unlikely(err)) {
552 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
553 				   first, first + num_vfs - 1, what);
554 		if (last > first)
555 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
556 						    get, what, last, 0);
557 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
558 	}
559 
560 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
561 	value = get(gt, first);
562 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
563 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
564 			 first, first + num_vfs - 1, value, size, what);
565 	return 0;
566 }
567 
568 /**
569  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
570  * @gt: the &xe_gt (can't be media)
571  * @vfid: starting VF identifier (can't be 0)
572  * @num_vfs: number of VFs to provision
573  * @size: requested GGTT size
574  *
575  * This function can only be called on PF.
576  *
577  * Return: 0 on success or a negative error code on failure.
578  */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)579 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
580 					unsigned int num_vfs, u64 size)
581 {
582 	unsigned int n;
583 	int err = 0;
584 
585 	xe_gt_assert(gt, vfid);
586 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
587 
588 	if (!num_vfs)
589 		return 0;
590 
591 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
592 	for (n = vfid; n < vfid + num_vfs; n++) {
593 		err = pf_provision_vf_ggtt(gt, n, size);
594 		if (err)
595 			break;
596 	}
597 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
598 
599 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
600 					   xe_gt_sriov_pf_config_get_ggtt,
601 					   "GGTT", n, err);
602 }
603 
604 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)605 static u64 pf_get_max_ggtt(struct xe_gt *gt)
606 {
607 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
608 	u64 alignment = pf_get_ggtt_alignment(gt);
609 	u64 spare = pf_get_spare_ggtt(gt);
610 	u64 max_hole;
611 
612 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
613 
614 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
615 				max_hole / SZ_1K, spare / SZ_1K);
616 	return max_hole > spare ? max_hole - spare : 0;
617 }
618 
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)619 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
620 {
621 	u64 available = pf_get_max_ggtt(gt);
622 	u64 alignment = pf_get_ggtt_alignment(gt);
623 	u64 fair;
624 
625 	/*
626 	 * To simplify the logic we only look at single largest GGTT region
627 	 * as that will be always the best fit for 1 VF case, and most likely
628 	 * will also nicely cover other cases where VFs are provisioned on the
629 	 * fresh and idle PF driver, without any stale GGTT allocations spread
630 	 * in the middle of the full GGTT range.
631 	 */
632 
633 	fair = div_u64(available, num_vfs);
634 	fair = ALIGN_DOWN(fair, alignment);
635 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
636 				available / SZ_1K, num_vfs, fair / SZ_1K);
637 	return fair;
638 }
639 
640 /**
641  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
642  * @gt: the &xe_gt (can't be media)
643  * @vfid: starting VF identifier (can't be 0)
644  * @num_vfs: number of VFs to provision
645  *
646  * This function can only be called on PF.
647  *
648  * Return: 0 on success or a negative error code on failure.
649  */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)650 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
651 					unsigned int num_vfs)
652 {
653 	u64 fair;
654 
655 	xe_gt_assert(gt, vfid);
656 	xe_gt_assert(gt, num_vfs);
657 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
658 
659 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
660 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
661 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
662 
663 	if (!fair)
664 		return -ENOSPC;
665 
666 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
667 }
668 
pf_get_min_spare_ctxs(struct xe_gt * gt)669 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
670 {
671 	/* XXX: preliminary */
672 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
673 		hweight64(gt->info.engine_mask) : SZ_256;
674 }
675 
pf_get_spare_ctxs(struct xe_gt * gt)676 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
677 {
678 	u32 spare;
679 
680 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
681 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
682 
683 	spare = gt->sriov.pf.spare.num_ctxs;
684 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
685 
686 	return spare;
687 }
688 
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)689 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
690 {
691 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
692 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
693 
694 	if (spare > GUC_ID_MAX)
695 		return -EINVAL;
696 
697 	if (spare && spare < pf_get_min_spare_ctxs(gt))
698 		return -EINVAL;
699 
700 	gt->sriov.pf.spare.num_ctxs = spare;
701 
702 	return 0;
703 }
704 
705 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)706 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
707 {
708 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
709 	unsigned int spare = pf_get_spare_ctxs(gt);
710 
711 	return xe_guc_id_mgr_reserve(idm, num, spare);
712 }
713 
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)714 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
715 {
716 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
717 
718 	if (num)
719 		xe_guc_id_mgr_release(idm, start, num);
720 }
721 
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)722 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
723 {
724 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
725 
726 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
727 	config->begin_ctx = 0;
728 	config->num_ctxs = 0;
729 }
730 
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)731 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
732 {
733 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
734 	int ret;
735 
736 	xe_gt_assert(gt, vfid);
737 
738 	if (num_ctxs > GUC_ID_MAX)
739 		return -EINVAL;
740 
741 	if (config->num_ctxs) {
742 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
743 		if (unlikely(ret))
744 			return ret;
745 
746 		pf_release_config_ctxs(gt, config);
747 	}
748 
749 	if (!num_ctxs)
750 		return 0;
751 
752 	ret = pf_reserve_ctxs(gt, num_ctxs);
753 	if (unlikely(ret < 0))
754 		return ret;
755 
756 	config->begin_ctx = ret;
757 	config->num_ctxs = num_ctxs;
758 
759 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
760 	if (unlikely(ret)) {
761 		pf_release_config_ctxs(gt, config);
762 		return ret;
763 	}
764 
765 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
766 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
767 	return 0;
768 }
769 
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)770 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
771 {
772 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
773 
774 	return config->num_ctxs;
775 }
776 
777 /**
778  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
779  * @gt: the &xe_gt
780  * @vfid: the VF identifier
781  *
782  * This function can only be called on PF.
783  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
784  *
785  * Return: VF's quota (or PF's spare).
786  */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)787 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
788 {
789 	u32 num_ctxs;
790 
791 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
792 	if (vfid)
793 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
794 	else
795 		num_ctxs = pf_get_spare_ctxs(gt);
796 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
797 
798 	return num_ctxs;
799 }
800 
no_unit(u32 unused)801 static const char *no_unit(u32 unused)
802 {
803 	return "";
804 }
805 
spare_unit(u32 unused)806 static const char *spare_unit(u32 unused)
807 {
808 	return " spare";
809 }
810 
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)811 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
812 				  const char *what, const char *(*unit)(u32), int err)
813 {
814 	char name[8];
815 
816 	xe_sriov_function_name(vfid, name, sizeof(name));
817 
818 	if (unlikely(err)) {
819 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
820 				   name, value, unit(value), what, ERR_PTR(err));
821 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
822 				 name, actual, unit(actual), what);
823 		return err;
824 	}
825 
826 	/* the actual value may have changed during provisioning */
827 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
828 			 name, actual, unit(actual), what);
829 	return 0;
830 }
831 
832 /**
833  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
834  * @gt: the &xe_gt
835  * @vfid: the VF identifier
836  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
837  *
838  * This function can only be called on PF.
839  *
840  * Return: 0 on success or a negative error code on failure.
841  */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)842 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
843 {
844 	int err;
845 
846 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
847 	if (vfid)
848 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
849 	else
850 		err = pf_set_spare_ctxs(gt, num_ctxs);
851 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
852 
853 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
854 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
855 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
856 }
857 
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)858 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
859 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
860 				       const char *what, const char *(*unit)(u32),
861 				       unsigned int last, int err)
862 {
863 	xe_gt_assert(gt, first);
864 	xe_gt_assert(gt, num_vfs);
865 	xe_gt_assert(gt, first <= last);
866 
867 	if (num_vfs == 1)
868 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
869 
870 	if (unlikely(err)) {
871 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
872 				   first, first + num_vfs - 1, what);
873 		if (last > first)
874 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
875 						    get, what, unit, last, 0);
876 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
877 	}
878 
879 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
880 	value = get(gt, first);
881 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
882 			 first, first + num_vfs - 1, value, unit(value), what);
883 	return 0;
884 }
885 
886 /**
887  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
888  * @gt: the &xe_gt
889  * @vfid: starting VF identifier
890  * @num_vfs: number of VFs to provision
891  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
892  *
893  * This function can only be called on PF.
894  *
895  * Return: 0 on success or a negative error code on failure.
896  */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)897 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
898 					unsigned int num_vfs, u32 num_ctxs)
899 {
900 	unsigned int n;
901 	int err = 0;
902 
903 	xe_gt_assert(gt, vfid);
904 
905 	if (!num_vfs)
906 		return 0;
907 
908 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
909 	for (n = vfid; n < vfid + num_vfs; n++) {
910 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
911 		if (err)
912 			break;
913 	}
914 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
915 
916 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
917 					   xe_gt_sriov_pf_config_get_ctxs,
918 					   "GuC context IDs", no_unit, n, err);
919 }
920 
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)921 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
922 {
923 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
924 	u32 spare = pf_get_spare_ctxs(gt);
925 	u32 fair = (idm->total - spare) / num_vfs;
926 	int ret;
927 
928 	for (; fair; --fair) {
929 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
930 		if (ret < 0)
931 			continue;
932 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
933 		break;
934 	}
935 
936 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
937 	return fair;
938 }
939 
940 /**
941  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
942  * @gt: the &xe_gt
943  * @vfid: starting VF identifier (can't be 0)
944  * @num_vfs: number of VFs to provision (can't be 0)
945  *
946  * This function can only be called on PF.
947  *
948  * Return: 0 on success or a negative error code on failure.
949  */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)950 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
951 					unsigned int num_vfs)
952 {
953 	u32 fair;
954 
955 	xe_gt_assert(gt, vfid);
956 	xe_gt_assert(gt, num_vfs);
957 
958 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
959 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
960 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
961 
962 	if (!fair)
963 		return -ENOSPC;
964 
965 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
966 }
967 
pf_get_min_spare_dbs(struct xe_gt * gt)968 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
969 {
970 	/* XXX: preliminary, we don't use doorbells yet! */
971 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
972 }
973 
pf_get_spare_dbs(struct xe_gt * gt)974 static u32 pf_get_spare_dbs(struct xe_gt *gt)
975 {
976 	u32 spare;
977 
978 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
979 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
980 
981 	spare = gt->sriov.pf.spare.num_dbs;
982 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
983 
984 	return spare;
985 }
986 
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)987 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
988 {
989 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
990 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
991 
992 	if (spare > GUC_NUM_DOORBELLS)
993 		return -EINVAL;
994 
995 	if (spare && spare < pf_get_min_spare_dbs(gt))
996 		return -EINVAL;
997 
998 	gt->sriov.pf.spare.num_dbs = spare;
999 	return 0;
1000 }
1001 
1002 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1003 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1004 {
1005 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1006 	unsigned int spare = pf_get_spare_dbs(gt);
1007 
1008 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1009 }
1010 
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1011 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1012 {
1013 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1014 
1015 	if (num)
1016 		xe_guc_db_mgr_release_range(dbm, start, num);
1017 }
1018 
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1019 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1020 {
1021 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1022 
1023 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1024 	config->begin_db = 0;
1025 	config->num_dbs = 0;
1026 }
1027 
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1028 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1029 {
1030 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1031 	int ret;
1032 
1033 	xe_gt_assert(gt, vfid);
1034 
1035 	if (num_dbs > GUC_NUM_DOORBELLS)
1036 		return -EINVAL;
1037 
1038 	if (config->num_dbs) {
1039 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1040 		if (unlikely(ret))
1041 			return ret;
1042 
1043 		pf_release_config_dbs(gt, config);
1044 	}
1045 
1046 	if (!num_dbs)
1047 		return 0;
1048 
1049 	ret = pf_reserve_dbs(gt, num_dbs);
1050 	if (unlikely(ret < 0))
1051 		return ret;
1052 
1053 	config->begin_db = ret;
1054 	config->num_dbs = num_dbs;
1055 
1056 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1057 	if (unlikely(ret)) {
1058 		pf_release_config_dbs(gt, config);
1059 		return ret;
1060 	}
1061 
1062 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1063 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1064 	return 0;
1065 }
1066 
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1067 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1068 {
1069 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1070 
1071 	return config->num_dbs;
1072 }
1073 
1074 /**
1075  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1076  * @gt: the &xe_gt
1077  * @vfid: the VF identifier
1078  *
1079  * This function can only be called on PF.
1080  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1081  *
1082  * Return: VF's quota (or PF's spare).
1083  */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1084 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1085 {
1086 	u32 num_dbs;
1087 
1088 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1089 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1090 
1091 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1092 	if (vfid)
1093 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1094 	else
1095 		num_dbs = pf_get_spare_dbs(gt);
1096 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1097 
1098 	return num_dbs;
1099 }
1100 
1101 /**
1102  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1103  * @gt: the &xe_gt
1104  * @vfid: the VF identifier
1105  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1106  *
1107  * This function can only be called on PF.
1108  *
1109  * Return: 0 on success or a negative error code on failure.
1110  */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1111 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1112 {
1113 	int err;
1114 
1115 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1116 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1117 
1118 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1119 	if (vfid)
1120 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1121 	else
1122 		err = pf_set_spare_dbs(gt, num_dbs);
1123 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1124 
1125 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1126 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1127 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1128 }
1129 
1130 /**
1131  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1132  * @gt: the &xe_gt
1133  * @vfid: starting VF identifier (can't be 0)
1134  * @num_vfs: number of VFs to provision
1135  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1136  *
1137  * This function can only be called on PF.
1138  *
1139  * Return: 0 on success or a negative error code on failure.
1140  */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1141 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1142 				       unsigned int num_vfs, u32 num_dbs)
1143 {
1144 	unsigned int n;
1145 	int err = 0;
1146 
1147 	xe_gt_assert(gt, vfid);
1148 
1149 	if (!num_vfs)
1150 		return 0;
1151 
1152 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1153 	for (n = vfid; n < vfid + num_vfs; n++) {
1154 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1155 		if (err)
1156 			break;
1157 	}
1158 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1159 
1160 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1161 					   xe_gt_sriov_pf_config_get_dbs,
1162 					   "GuC doorbell IDs", no_unit, n, err);
1163 }
1164 
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1165 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1166 {
1167 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1168 	u32 spare = pf_get_spare_dbs(gt);
1169 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1170 	int ret;
1171 
1172 	for (; fair; --fair) {
1173 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1174 		if (ret < 0)
1175 			continue;
1176 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1177 		break;
1178 	}
1179 
1180 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1181 	return fair;
1182 }
1183 
1184 /**
1185  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1186  * @gt: the &xe_gt
1187  * @vfid: starting VF identifier (can't be 0)
1188  * @num_vfs: number of VFs to provision (can't be 0)
1189  *
1190  * This function can only be called on PF.
1191  *
1192  * Return: 0 on success or a negative error code on failure.
1193  */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1194 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1195 				       unsigned int num_vfs)
1196 {
1197 	u32 fair;
1198 
1199 	xe_gt_assert(gt, vfid);
1200 	xe_gt_assert(gt, num_vfs);
1201 
1202 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1203 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1204 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1205 
1206 	if (!fair)
1207 		return -ENOSPC;
1208 
1209 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1210 }
1211 
pf_get_lmem_alignment(struct xe_gt * gt)1212 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1213 {
1214 	/* this might be platform dependent */
1215 	return SZ_2M;
1216 }
1217 
pf_get_min_spare_lmem(struct xe_gt * gt)1218 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1219 {
1220 	/* this might be platform dependent */
1221 	return SZ_128M; /* XXX: preliminary */
1222 }
1223 
pf_get_spare_lmem(struct xe_gt * gt)1224 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1225 {
1226 	u64 spare;
1227 
1228 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1229 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1230 
1231 	spare = gt->sriov.pf.spare.lmem_size;
1232 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1233 
1234 	return spare;
1235 }
1236 
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1237 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1238 {
1239 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1240 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1241 
1242 	if (size && size < pf_get_min_spare_lmem(gt))
1243 		return -EINVAL;
1244 
1245 	gt->sriov.pf.spare.lmem_size = size;
1246 	return 0;
1247 }
1248 
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1249 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1250 {
1251 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1252 	struct xe_bo *bo;
1253 
1254 	bo = config->lmem_obj;
1255 	return bo ? bo->size : 0;
1256 }
1257 
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1258 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1259 {
1260 	struct xe_device *xe = gt_to_xe(gt);
1261 	struct xe_tile *tile;
1262 	unsigned int tid;
1263 	int err;
1264 
1265 	for_each_tile(tile, xe, tid) {
1266 		if (tile->primary_gt == gt) {
1267 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1268 		} else {
1269 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1270 
1271 			if (!lmem)
1272 				continue;
1273 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1274 		}
1275 		if (unlikely(err))
1276 			return err;
1277 	}
1278 	return 0;
1279 }
1280 
pf_force_lmtt_invalidate(struct xe_device * xe)1281 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1282 {
1283 	/* TODO */
1284 }
1285 
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1286 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1287 {
1288 	struct xe_lmtt *lmtt;
1289 	struct xe_tile *tile;
1290 	unsigned int tid;
1291 
1292 	xe_assert(xe, IS_DGFX(xe));
1293 	xe_assert(xe, IS_SRIOV_PF(xe));
1294 
1295 	for_each_tile(tile, xe, tid) {
1296 		lmtt = &tile->sriov.pf.lmtt;
1297 		xe_lmtt_drop_pages(lmtt, vfid);
1298 	}
1299 }
1300 
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1301 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1302 {
1303 	struct xe_gt_sriov_config *config;
1304 	struct xe_tile *tile;
1305 	struct xe_lmtt *lmtt;
1306 	struct xe_bo *bo;
1307 	struct xe_gt *gt;
1308 	u64 total, offset;
1309 	unsigned int gtid;
1310 	unsigned int tid;
1311 	int err;
1312 
1313 	xe_assert(xe, IS_DGFX(xe));
1314 	xe_assert(xe, IS_SRIOV_PF(xe));
1315 
1316 	total = 0;
1317 	for_each_tile(tile, xe, tid)
1318 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1319 
1320 	for_each_tile(tile, xe, tid) {
1321 		lmtt = &tile->sriov.pf.lmtt;
1322 
1323 		xe_lmtt_drop_pages(lmtt, vfid);
1324 		if (!total)
1325 			continue;
1326 
1327 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1328 		if (err)
1329 			goto fail;
1330 
1331 		offset = 0;
1332 		for_each_gt(gt, xe, gtid) {
1333 			if (xe_gt_is_media_type(gt))
1334 				continue;
1335 
1336 			config = pf_pick_vf_config(gt, vfid);
1337 			bo = config->lmem_obj;
1338 			if (!bo)
1339 				continue;
1340 
1341 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1342 			if (err)
1343 				goto fail;
1344 			offset += bo->size;
1345 		}
1346 	}
1347 
1348 	pf_force_lmtt_invalidate(xe);
1349 	return 0;
1350 
1351 fail:
1352 	for_each_tile(tile, xe, tid) {
1353 		lmtt = &tile->sriov.pf.lmtt;
1354 		xe_lmtt_drop_pages(lmtt, vfid);
1355 	}
1356 	return err;
1357 }
1358 
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1359 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1360 {
1361 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1362 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1363 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1364 
1365 	if (config->lmem_obj) {
1366 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1367 		config->lmem_obj = NULL;
1368 	}
1369 }
1370 
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1371 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1372 {
1373 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1374 	struct xe_device *xe = gt_to_xe(gt);
1375 	struct xe_tile *tile = gt_to_tile(gt);
1376 	struct xe_bo *bo;
1377 	int err;
1378 
1379 	xe_gt_assert(gt, vfid);
1380 	xe_gt_assert(gt, IS_DGFX(xe));
1381 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1382 
1383 	size = round_up(size, pf_get_lmem_alignment(gt));
1384 
1385 	if (config->lmem_obj) {
1386 		err = pf_distribute_config_lmem(gt, vfid, 0);
1387 		if (unlikely(err))
1388 			return err;
1389 
1390 		pf_reset_vf_lmtt(xe, vfid);
1391 		pf_release_vf_config_lmem(gt, config);
1392 	}
1393 	xe_gt_assert(gt, !config->lmem_obj);
1394 
1395 	if (!size)
1396 		return 0;
1397 
1398 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1399 	bo = xe_bo_create_pin_map(xe, tile, NULL,
1400 				  ALIGN(size, PAGE_SIZE),
1401 				  ttm_bo_type_kernel,
1402 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1403 				  XE_BO_FLAG_NEEDS_2M |
1404 				  XE_BO_FLAG_PINNED);
1405 	if (IS_ERR(bo))
1406 		return PTR_ERR(bo);
1407 
1408 	config->lmem_obj = bo;
1409 
1410 	err = pf_update_vf_lmtt(xe, vfid);
1411 	if (unlikely(err))
1412 		goto release;
1413 
1414 	err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1415 	if (unlikely(err))
1416 		goto reset_lmtt;
1417 
1418 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1419 				vfid, bo->size, bo->size / SZ_1M);
1420 	return 0;
1421 
1422 reset_lmtt:
1423 	pf_reset_vf_lmtt(xe, vfid);
1424 release:
1425 	pf_release_vf_config_lmem(gt, config);
1426 	return err;
1427 }
1428 
1429 /**
1430  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1431  * @gt: the &xe_gt
1432  * @vfid: the VF identifier
1433  *
1434  * This function can only be called on PF.
1435  *
1436  * Return: VF's (or PF's spare) LMEM quota.
1437  */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1438 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1439 {
1440 	u64 size;
1441 
1442 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1443 	if (vfid)
1444 		size = pf_get_vf_config_lmem(gt, vfid);
1445 	else
1446 		size = pf_get_spare_lmem(gt);
1447 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1448 
1449 	return size;
1450 }
1451 
1452 /**
1453  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1454  * @gt: the &xe_gt (can't be media)
1455  * @vfid: the VF identifier
1456  * @size: requested LMEM size
1457  *
1458  * This function can only be called on PF.
1459  */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1460 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1461 {
1462 	int err;
1463 
1464 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1465 	if (vfid)
1466 		err = pf_provision_vf_lmem(gt, vfid, size);
1467 	else
1468 		err = pf_set_spare_lmem(gt, size);
1469 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1470 
1471 	return pf_config_set_u64_done(gt, vfid, size,
1472 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1473 				      vfid ? "LMEM" : "spare LMEM", err);
1474 }
1475 
1476 /**
1477  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1478  * @gt: the &xe_gt (can't be media)
1479  * @vfid: starting VF identifier (can't be 0)
1480  * @num_vfs: number of VFs to provision
1481  * @size: requested LMEM size
1482  *
1483  * This function can only be called on PF.
1484  *
1485  * Return: 0 on success or a negative error code on failure.
1486  */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1487 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1488 					unsigned int num_vfs, u64 size)
1489 {
1490 	unsigned int n;
1491 	int err = 0;
1492 
1493 	xe_gt_assert(gt, vfid);
1494 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1495 
1496 	if (!num_vfs)
1497 		return 0;
1498 
1499 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1500 	for (n = vfid; n < vfid + num_vfs; n++) {
1501 		err = pf_provision_vf_lmem(gt, n, size);
1502 		if (err)
1503 			break;
1504 	}
1505 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1506 
1507 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1508 					   xe_gt_sriov_pf_config_get_lmem,
1509 					   "LMEM", n, err);
1510 }
1511 
pf_query_free_lmem(struct xe_gt * gt)1512 static u64 pf_query_free_lmem(struct xe_gt *gt)
1513 {
1514 	struct xe_tile *tile = gt->tile;
1515 
1516 	return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1517 }
1518 
pf_query_max_lmem(struct xe_gt * gt)1519 static u64 pf_query_max_lmem(struct xe_gt *gt)
1520 {
1521 	u64 alignment = pf_get_lmem_alignment(gt);
1522 	u64 spare = pf_get_spare_lmem(gt);
1523 	u64 free = pf_query_free_lmem(gt);
1524 	u64 avail;
1525 
1526 	/* XXX: need to account for 2MB blocks only */
1527 	avail = free > spare ? free - spare : 0;
1528 	avail = round_down(avail, alignment);
1529 
1530 	return avail;
1531 }
1532 
1533 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1534 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1535 #else
1536 #define MAX_FAIR_LMEM	SZ_2G	/* XXX: known issue with allocating BO over 2GiB */
1537 #endif
1538 
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1539 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1540 {
1541 	u64 available = pf_query_max_lmem(gt);
1542 	u64 alignment = pf_get_lmem_alignment(gt);
1543 	u64 fair;
1544 
1545 	fair = div_u64(available, num_vfs);
1546 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1547 	fair = ALIGN_DOWN(fair, alignment);
1548 #ifdef MAX_FAIR_LMEM
1549 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1550 #endif
1551 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1552 				available / SZ_1M, num_vfs, fair / SZ_1M);
1553 	return fair;
1554 }
1555 
1556 /**
1557  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1558  * @gt: the &xe_gt (can't be media)
1559  * @vfid: starting VF identifier (can't be 0)
1560  * @num_vfs: number of VFs to provision (can't be 0)
1561  *
1562  * This function can only be called on PF.
1563  *
1564  * Return: 0 on success or a negative error code on failure.
1565  */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1566 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1567 					unsigned int num_vfs)
1568 {
1569 	u64 fair;
1570 
1571 	xe_gt_assert(gt, vfid);
1572 	xe_gt_assert(gt, num_vfs);
1573 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1574 
1575 	if (!IS_DGFX(gt_to_xe(gt)))
1576 		return 0;
1577 
1578 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1579 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1580 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1581 
1582 	if (!fair)
1583 		return -ENOSPC;
1584 
1585 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1586 }
1587 
1588 /**
1589  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1590  * @gt: the &xe_gt
1591  * @vfid: starting VF identifier (can't be 0)
1592  * @num_vfs: number of VFs to provision (can't be 0)
1593  *
1594  * This function can only be called on PF.
1595  *
1596  * Return: 0 on success or a negative error code on failure.
1597  */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1598 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1599 				   unsigned int num_vfs)
1600 {
1601 	int result = 0;
1602 	int err;
1603 
1604 	xe_gt_assert(gt, vfid);
1605 	xe_gt_assert(gt, num_vfs);
1606 
1607 	if (!xe_gt_is_media_type(gt)) {
1608 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1609 		result = result ?: err;
1610 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1611 		result = result ?: err;
1612 	}
1613 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1614 	result = result ?: err;
1615 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1616 	result = result ?: err;
1617 
1618 	return result;
1619 }
1620 
exec_quantum_unit(u32 exec_quantum)1621 static const char *exec_quantum_unit(u32 exec_quantum)
1622 {
1623 	return exec_quantum ? "ms" : "(infinity)";
1624 }
1625 
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1626 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1627 				     u32 exec_quantum)
1628 {
1629 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1630 	int err;
1631 
1632 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1633 	if (unlikely(err))
1634 		return err;
1635 
1636 	config->exec_quantum = exec_quantum;
1637 	return 0;
1638 }
1639 
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1640 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1641 {
1642 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1643 
1644 	return config->exec_quantum;
1645 }
1646 
1647 /**
1648  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1649  * @gt: the &xe_gt
1650  * @vfid: the VF identifier
1651  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1652  *
1653  * This function can only be called on PF.
1654  *
1655  * Return: 0 on success or a negative error code on failure.
1656  */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1657 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1658 					   u32 exec_quantum)
1659 {
1660 	int err;
1661 
1662 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1663 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1664 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1665 
1666 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1667 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1668 				      "execution quantum", exec_quantum_unit, err);
1669 }
1670 
1671 /**
1672  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1673  * @gt: the &xe_gt
1674  * @vfid: the VF identifier
1675  *
1676  * This function can only be called on PF.
1677  *
1678  * Return: VF's (or PF's) execution quantum in milliseconds.
1679  */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1680 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1681 {
1682 	u32 exec_quantum;
1683 
1684 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1685 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1686 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1687 
1688 	return exec_quantum;
1689 }
1690 
preempt_timeout_unit(u32 preempt_timeout)1691 static const char *preempt_timeout_unit(u32 preempt_timeout)
1692 {
1693 	return preempt_timeout ? "us" : "(infinity)";
1694 }
1695 
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1696 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1697 					u32 preempt_timeout)
1698 {
1699 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1700 	int err;
1701 
1702 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1703 	if (unlikely(err))
1704 		return err;
1705 
1706 	config->preempt_timeout = preempt_timeout;
1707 
1708 	return 0;
1709 }
1710 
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1711 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1712 {
1713 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1714 
1715 	return config->preempt_timeout;
1716 }
1717 
1718 /**
1719  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1720  * @gt: the &xe_gt
1721  * @vfid: the VF identifier
1722  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1723  *
1724  * This function can only be called on PF.
1725  *
1726  * Return: 0 on success or a negative error code on failure.
1727  */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1728 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1729 					      u32 preempt_timeout)
1730 {
1731 	int err;
1732 
1733 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1734 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1735 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1736 
1737 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1738 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1739 				      "preemption timeout", preempt_timeout_unit, err);
1740 }
1741 
1742 /**
1743  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1744  * @gt: the &xe_gt
1745  * @vfid: the VF identifier
1746  *
1747  * This function can only be called on PF.
1748  *
1749  * Return: VF's (or PF's) preemption timeout in microseconds.
1750  */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1751 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1752 {
1753 	u32 preempt_timeout;
1754 
1755 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1756 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1757 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1758 
1759 	return preempt_timeout;
1760 }
1761 
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1762 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1763 {
1764 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1765 
1766 	config->exec_quantum = 0;
1767 	config->preempt_timeout = 0;
1768 }
1769 
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1770 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1771 				  enum xe_guc_klv_threshold_index index, u32 value)
1772 {
1773 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1774 	int err;
1775 
1776 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1777 	if (unlikely(err))
1778 		return err;
1779 
1780 	config->thresholds[index] = value;
1781 
1782 	return 0;
1783 }
1784 
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1785 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1786 			    enum xe_guc_klv_threshold_index index)
1787 {
1788 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1789 
1790 	return config->thresholds[index];
1791 }
1792 
threshold_unit(u32 threshold)1793 static const char *threshold_unit(u32 threshold)
1794 {
1795 	return threshold ? "" : "(disabled)";
1796 }
1797 
1798 /**
1799  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1800  * @gt: the &xe_gt
1801  * @vfid: the VF identifier
1802  * @index: the threshold index
1803  * @value: requested value (0 means disabled)
1804  *
1805  * This function can only be called on PF.
1806  *
1807  * Return: 0 on success or a negative error code on failure.
1808  */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1809 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1810 					enum xe_guc_klv_threshold_index index, u32 value)
1811 {
1812 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1813 	const char *name = xe_guc_klv_key_to_string(key);
1814 	int err;
1815 
1816 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1817 	err = pf_provision_threshold(gt, vfid, index, value);
1818 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1819 
1820 	return pf_config_set_u32_done(gt, vfid, value,
1821 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1822 				      name, threshold_unit, err);
1823 }
1824 
1825 /**
1826  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1827  * @gt: the &xe_gt
1828  * @vfid: the VF identifier
1829  * @index: the threshold index
1830  *
1831  * This function can only be called on PF.
1832  *
1833  * Return: value of VF's (or PF's) threshold.
1834  */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1835 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1836 					enum xe_guc_klv_threshold_index index)
1837 {
1838 	u32 value;
1839 
1840 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1841 	value = pf_get_threshold(gt, vfid, index);
1842 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1843 
1844 	return value;
1845 }
1846 
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)1847 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1848 {
1849 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1850 
1851 #define reset_threshold_config(TAG, ...) ({				\
1852 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
1853 });
1854 
1855 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1856 #undef reset_threshold_config
1857 }
1858 
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)1859 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1860 {
1861 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1862 	struct xe_device *xe = gt_to_xe(gt);
1863 
1864 	if (!xe_gt_is_media_type(gt)) {
1865 		pf_release_vf_config_ggtt(gt, config);
1866 		if (IS_DGFX(xe)) {
1867 			pf_release_vf_config_lmem(gt, config);
1868 			pf_update_vf_lmtt(xe, vfid);
1869 		}
1870 	}
1871 	pf_release_config_ctxs(gt, config);
1872 	pf_release_config_dbs(gt, config);
1873 	pf_reset_config_sched(gt, config);
1874 	pf_reset_config_thresholds(gt, config);
1875 }
1876 
1877 /**
1878  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1879  * @gt: the &xe_gt
1880  * @vfid: the VF identifier (can't be PF)
1881  * @force: force configuration release
1882  *
1883  * This function can only be called on PF.
1884  *
1885  * Return: 0 on success or a negative error code on failure.
1886  */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)1887 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1888 {
1889 	int err;
1890 
1891 	xe_gt_assert(gt, vfid);
1892 
1893 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1894 	err = pf_send_vf_cfg_reset(gt, vfid);
1895 	if (!err || force)
1896 		pf_release_vf_config(gt, vfid);
1897 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1898 
1899 	if (unlikely(err)) {
1900 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1901 				   vfid, ERR_PTR(err),
1902 				   force ? " but all resources were released anyway!" : "");
1903 	}
1904 
1905 	return force ? 0 : err;
1906 }
1907 
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)1908 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1909 {
1910 	if (xe_ggtt_node_allocated(ggtt_region))
1911 		xe_ggtt_assign(ggtt_region, vfid);
1912 }
1913 
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)1914 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1915 {
1916 	struct xe_migrate *m = tile->migrate;
1917 	struct dma_fence *fence;
1918 	int err;
1919 
1920 	if (!bo)
1921 		return 0;
1922 
1923 	xe_bo_lock(bo, false);
1924 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
1925 	if (IS_ERR(fence)) {
1926 		err = PTR_ERR(fence);
1927 	} else if (!fence) {
1928 		err = -ENOMEM;
1929 	} else {
1930 		long ret = dma_fence_wait_timeout(fence, false, timeout);
1931 
1932 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
1933 		dma_fence_put(fence);
1934 		if (!err)
1935 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
1936 						jiffies_to_msecs(timeout - ret));
1937 	}
1938 	xe_bo_unlock(bo);
1939 
1940 	return err;
1941 }
1942 
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)1943 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
1944 {
1945 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1946 	struct xe_tile *tile = gt_to_tile(gt);
1947 	struct xe_device *xe = gt_to_xe(gt);
1948 	int err = 0;
1949 
1950 	/*
1951 	 * Only GGTT and LMEM requires to be cleared by the PF.
1952 	 * GuC doorbell IDs and context IDs do not need any clearing.
1953 	 */
1954 	if (!xe_gt_is_media_type(gt)) {
1955 		pf_sanitize_ggtt(config->ggtt_region, vfid);
1956 		if (IS_DGFX(xe))
1957 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
1958 	}
1959 
1960 	return err;
1961 }
1962 
1963 /**
1964  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
1965  * @gt: the &xe_gt
1966  * @vfid: the VF identifier (can't be PF)
1967  * @timeout: maximum timeout to wait for completion in jiffies
1968  *
1969  * This function can only be called on PF.
1970  *
1971  * Return: 0 on success or a negative error code on failure.
1972  */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)1973 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
1974 {
1975 	int err;
1976 
1977 	xe_gt_assert(gt, vfid != PFID);
1978 
1979 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1980 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
1981 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1982 
1983 	if (unlikely(err))
1984 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
1985 				   vfid, ERR_PTR(err));
1986 	return err;
1987 }
1988 
1989 /**
1990  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1991  * @gt: the &xe_gt
1992  * @vfid: the VF identifier (can't be PF)
1993  * @refresh: explicit refresh
1994  *
1995  * This function can only be called on PF.
1996  *
1997  * Return: 0 on success or a negative error code on failure.
1998  */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)1999 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2000 {
2001 	int err = 0;
2002 
2003 	xe_gt_assert(gt, vfid);
2004 
2005 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2006 	if (refresh)
2007 		err = pf_send_vf_cfg_reset(gt, vfid);
2008 	if (!err)
2009 		err = pf_push_full_vf_config(gt, vfid);
2010 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2011 
2012 	if (unlikely(err)) {
2013 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2014 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2015 	}
2016 
2017 	return err;
2018 }
2019 
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2020 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2021 {
2022 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2023 	struct xe_device *xe = gt_to_xe(gt);
2024 	bool is_primary = !xe_gt_is_media_type(gt);
2025 	bool valid_ggtt, valid_ctxs, valid_dbs;
2026 	bool valid_any, valid_all;
2027 
2028 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2029 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2030 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2031 
2032 	/* note that GuC doorbells are optional */
2033 	valid_any = valid_ctxs || valid_dbs;
2034 	valid_all = valid_ctxs;
2035 
2036 	/* and GGTT/LMEM is configured on primary GT only */
2037 	valid_all = valid_all && valid_ggtt;
2038 	valid_any = valid_any || (valid_ggtt && is_primary);
2039 
2040 	if (IS_DGFX(xe)) {
2041 		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
2042 
2043 		valid_any = valid_any || (valid_lmem && is_primary);
2044 		valid_all = valid_all && valid_lmem;
2045 	}
2046 
2047 	return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
2048 }
2049 
2050 /**
2051  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2052  * @gt: the &xe_gt
2053  * @vfid: the VF identifier (can't be PF)
2054  *
2055  * This function can only be called on PF.
2056  *
2057  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2058  */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2059 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2060 {
2061 	bool empty;
2062 
2063 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2064 	xe_gt_assert(gt, vfid);
2065 
2066 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2067 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2068 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2069 
2070 	return empty;
2071 }
2072 
2073 /**
2074  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2075  * @gt: the &xe_gt
2076  *
2077  * Any prior configurations pushed to GuC are lost when the GT is reset.
2078  * Push again all non-empty VF configurations to the GuC.
2079  *
2080  * This function can only be called on PF.
2081  */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2082 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2083 {
2084 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2085 	unsigned int fail = 0, skip = 0;
2086 
2087 	for (n = 1; n <= total_vfs; n++) {
2088 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2089 			skip++;
2090 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2091 			fail++;
2092 	}
2093 
2094 	if (fail)
2095 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2096 				   fail, total_vfs - skip, str_plural(total_vfs));
2097 
2098 	if (fail != total_vfs)
2099 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2100 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2101 }
2102 
2103 /**
2104  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2105  * @gt: the &xe_gt
2106  * @p: the &drm_printer
2107  *
2108  * Print GGTT configuration data for all VFs.
2109  * VFs without provisioned GGTT are ignored.
2110  *
2111  * This function can only be called on PF.
2112  */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2113 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2114 {
2115 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2116 	const struct xe_gt_sriov_config *config;
2117 	char buf[10];
2118 
2119 	for (n = 1; n <= total_vfs; n++) {
2120 		config = &gt->sriov.pf.vfs[n].config;
2121 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2122 			continue;
2123 
2124 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2125 				buf, sizeof(buf));
2126 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2127 			   n, config->ggtt_region->base.start,
2128 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2129 			   buf);
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 /**
2136  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2137  * @gt: the &xe_gt
2138  * @p: the &drm_printer
2139  *
2140  * Print GuC context ID allocations across all VFs.
2141  * VFs without GuC context IDs are skipped.
2142  *
2143  * This function can only be called on PF.
2144  * Return: 0 on success or a negative error code on failure.
2145  */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2146 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2147 {
2148 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2149 	const struct xe_gt_sriov_config *config;
2150 
2151 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2152 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2153 
2154 	for (n = 1; n <= total_vfs; n++) {
2155 		config = &gt->sriov.pf.vfs[n].config;
2156 		if (!config->num_ctxs)
2157 			continue;
2158 
2159 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2160 			   n,
2161 			   config->begin_ctx,
2162 			   config->begin_ctx + config->num_ctxs - 1,
2163 			   config->num_ctxs);
2164 	}
2165 
2166 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2167 	return 0;
2168 }
2169 
2170 /**
2171  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2172  * @gt: the &xe_gt
2173  * @p: the &drm_printer
2174  *
2175  * Print GuC doorbell IDs allocations across all VFs.
2176  * VFs without GuC doorbell IDs are skipped.
2177  *
2178  * This function can only be called on PF.
2179  * Return: 0 on success or a negative error code on failure.
2180  */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2181 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2182 {
2183 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2184 	const struct xe_gt_sriov_config *config;
2185 
2186 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2187 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2188 
2189 	for (n = 1; n <= total_vfs; n++) {
2190 		config = &gt->sriov.pf.vfs[n].config;
2191 		if (!config->num_dbs)
2192 			continue;
2193 
2194 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2195 			   n,
2196 			   config->begin_db,
2197 			   config->begin_db + config->num_dbs - 1,
2198 			   config->num_dbs);
2199 	}
2200 
2201 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2202 	return 0;
2203 }
2204 
2205 /**
2206  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2207  * @gt: the &xe_gt
2208  * @p: the &drm_printer
2209  *
2210  * Print GGTT ranges that are available for the provisioning.
2211  *
2212  * This function can only be called on PF.
2213  */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2214 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2215 {
2216 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2217 	u64 alignment = pf_get_ggtt_alignment(gt);
2218 	u64 spare, avail, total;
2219 	char buf[10];
2220 
2221 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2222 
2223 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2224 
2225 	spare = pf_get_spare_ggtt(gt);
2226 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2227 
2228 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2229 
2230 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2231 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2232 
2233 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2234 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2235 
2236 	avail = total > spare ? total - spare : 0;
2237 
2238 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2239 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2240 
2241 	return 0;
2242 }
2243