1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024 Intel Corporation */
3 #include <linux/delay.h>
4 #include <linux/dev_printk.h>
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <asm/errno.h>
10
11 #include "adf_accel_devices.h"
12 #include "adf_common_drv.h"
13 #include "adf_gen4_hw_data.h"
14 #include "adf_gen4_pfvf.h"
15 #include "adf_pfvf_utils.h"
16 #include "adf_mstate_mgr.h"
17 #include "adf_gen4_vf_mig.h"
18
19 #define ADF_GEN4_VF_MSTATE_SIZE 4096
20 #define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000
21
22 static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev);
23 static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len);
24
adf_gen4_vfmig_init_device(struct qat_mig_dev * mdev)25 static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev)
26 {
27 u8 *state;
28
29 state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL);
30 if (!state)
31 return -ENOMEM;
32
33 mdev->state = state;
34 mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE;
35 mdev->setup_size = 0;
36 mdev->remote_setup_size = 0;
37
38 return 0;
39 }
40
adf_gen4_vfmig_cleanup_device(struct qat_mig_dev * mdev)41 static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev)
42 {
43 kfree(mdev->state);
44 mdev->state = NULL;
45 }
46
adf_gen4_vfmig_reset_device(struct qat_mig_dev * mdev)47 static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev)
48 {
49 mdev->setup_size = 0;
50 mdev->remote_setup_size = 0;
51 }
52
adf_gen4_vfmig_open_device(struct qat_mig_dev * mdev)53 static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev)
54 {
55 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
56 struct adf_accel_vf_info *vf_info;
57 struct adf_gen4_vfmig *vfmig;
58
59 vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
60
61 vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
62 if (!vfmig)
63 return -ENOMEM;
64
65 vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size);
66 if (!vfmig->mstate_mgr) {
67 kfree(vfmig);
68 return -ENOMEM;
69 }
70 vf_info->mig_priv = vfmig;
71 mdev->setup_size = 0;
72 mdev->remote_setup_size = 0;
73
74 return 0;
75 }
76
adf_gen4_vfmig_close_device(struct qat_mig_dev * mdev)77 static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev)
78 {
79 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
80 struct adf_accel_vf_info *vf_info;
81 struct adf_gen4_vfmig *vfmig;
82
83 vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
84 if (vf_info->mig_priv) {
85 vfmig = vf_info->mig_priv;
86 adf_mstate_mgr_destroy(vfmig->mstate_mgr);
87 kfree(vfmig);
88 vf_info->mig_priv = NULL;
89 }
90 }
91
adf_gen4_vfmig_suspend_device(struct qat_mig_dev * mdev)92 static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev)
93 {
94 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
95 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
96 struct adf_accel_vf_info *vf_info;
97 struct adf_gen4_vfmig *vf_mig;
98 u32 vf_nr = mdev->vf_id;
99 int ret, i;
100
101 vf_info = &accel_dev->pf.vf_info[vf_nr];
102 vf_mig = vf_info->mig_priv;
103
104 /* Stop all inflight jobs */
105 for (i = 0; i < hw_data->num_banks_per_vf; i++) {
106 u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
107
108 ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr,
109 ADF_RPRESET_POLL_TIMEOUT_US);
110 if (ret) {
111 dev_err(&GET_DEV(accel_dev),
112 "Failed to drain bank %d for vf_nr %d\n", i,
113 vf_nr);
114 return ret;
115 }
116 vf_mig->bank_stopped[i] = true;
117
118 adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr,
119 ADF_COALESCED_POLL_TIMEOUT_US);
120 }
121
122 return 0;
123 }
124
adf_gen4_vfmig_resume_device(struct qat_mig_dev * mdev)125 static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev)
126 {
127 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
128 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
129 struct adf_accel_vf_info *vf_info;
130 struct adf_gen4_vfmig *vf_mig;
131 u32 vf_nr = mdev->vf_id;
132 int i;
133
134 vf_info = &accel_dev->pf.vf_info[vf_nr];
135 vf_mig = vf_info->mig_priv;
136
137 for (i = 0; i < hw_data->num_banks_per_vf; i++) {
138 u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
139
140 if (vf_mig->bank_stopped[i]) {
141 adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr);
142 vf_mig->bank_stopped[i] = false;
143 }
144 }
145
146 return 0;
147 }
148
149 struct adf_vf_bank_info {
150 struct adf_accel_dev *accel_dev;
151 u32 vf_nr;
152 u32 bank_nr;
153 };
154
155 struct mig_user_sla {
156 enum adf_base_services srv;
157 u64 rp_mask;
158 u32 cir;
159 u32 pir;
160 };
161
adf_mstate_sla_check(struct adf_mstate_mgr * sub_mgr,u8 * src_buf,u32 src_size,void * opaque)162 static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf,
163 u32 src_size, void *opaque)
164 {
165 struct adf_mstate_vreginfo _sinfo = { src_buf, src_size };
166 struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque;
167 u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla);
168 u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla);
169 struct mig_user_sla *src_slas = sinfo->addr;
170 struct mig_user_sla *dst_slas = dinfo->addr;
171 int i, j;
172
173 for (i = 0; i < src_sla_cnt; i++) {
174 for (j = 0; j < dst_sla_cnt; j++) {
175 if (src_slas[i].srv != dst_slas[j].srv ||
176 src_slas[i].rp_mask != dst_slas[j].rp_mask)
177 continue;
178
179 if (src_slas[i].cir > dst_slas[j].cir ||
180 src_slas[i].pir > dst_slas[j].pir) {
181 pr_err("QAT: DST VF rate limiting mismatch.\n");
182 return -EINVAL;
183 }
184 break;
185 }
186
187 if (j == dst_sla_cnt) {
188 pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n",
189 src_slas[i].srv, src_slas[i].rp_mask);
190 return -EINVAL;
191 }
192 }
193
194 return 0;
195 }
196
adf_mstate_check_cap_size(u32 src_sz,u32 dst_sz,u32 max_sz)197 static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz)
198 {
199 if (src_sz > max_sz || dst_sz > max_sz)
200 return -EINVAL;
201 else
202 return 0;
203 }
204
adf_mstate_compatver_check(struct adf_mstate_mgr * sub_mgr,u8 * src_buf,u32 src_sz,void * opaque)205 static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr,
206 u8 *src_buf, u32 src_sz, void *opaque)
207 {
208 struct adf_mstate_vreginfo *info = opaque;
209 u8 compat = 0;
210 u8 *pcompat;
211
212 if (src_sz != info->size) {
213 pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n",
214 src_sz, info->size);
215 return -EINVAL;
216 }
217
218 memcpy(info->addr, src_buf, info->size);
219 pcompat = info->addr;
220 if (*pcompat == 0) {
221 pr_warn("QAT: Unable to determine the version of VF\n");
222 return 0;
223 }
224
225 compat = adf_vf_compat_checker(*pcompat);
226 if (compat == ADF_PF2VF_VF_INCOMPATIBLE) {
227 pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n",
228 *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
229 return -EINVAL;
230 }
231
232 if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN)
233 pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n",
234 *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
235
236 return 0;
237 }
238
239 /*
240 * adf_mstate_capmask_compare() - compare QAT device capability mask
241 * @sinfo: Pointer to source capability info
242 * @dinfo: Pointer to target capability info
243 *
244 * This function compares the capability mask between source VF and target VF
245 *
246 * Returns: 0 if target capability mask is identical to source capability mask,
247 * 1 if target mask can represent all the capabilities represented by source mask,
248 * -1 if target mask can't represent all the capabilities represented by source
249 * mask.
250 */
adf_mstate_capmask_compare(struct adf_mstate_vreginfo * sinfo,struct adf_mstate_vreginfo * dinfo)251 static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
252 struct adf_mstate_vreginfo *dinfo)
253 {
254 u64 src = 0, dst = 0;
255
256 if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) {
257 pr_debug("QAT: Unexpected capability size %u %u %zu\n",
258 sinfo->size, dinfo->size, sizeof(u64));
259 return -1;
260 }
261
262 memcpy(&src, sinfo->addr, sinfo->size);
263 memcpy(&dst, dinfo->addr, dinfo->size);
264
265 pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst);
266
267 if (src == dst)
268 return 0;
269
270 if ((src | dst) == dst)
271 return 1;
272
273 return -1;
274 }
275
adf_mstate_capmask_superset(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)276 static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf,
277 u32 size, void *opa)
278 {
279 struct adf_mstate_vreginfo sinfo = { buf, size };
280
281 if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
282 return 0;
283
284 return -EINVAL;
285 }
286
adf_mstate_capmask_equal(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)287 static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf,
288 u32 size, void *opa)
289 {
290 struct adf_mstate_vreginfo sinfo = { buf, size };
291
292 if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
293 return 0;
294
295 return -EINVAL;
296 }
297
adf_mstate_set_vreg(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)298 static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf,
299 u32 size, void *opa)
300 {
301 struct adf_mstate_vreginfo *info = opa;
302
303 if (size != info->size) {
304 pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size);
305 return -EINVAL;
306 }
307 memcpy(info->addr, buf, info->size);
308
309 return 0;
310 }
311
adf_gen4_vfmig_get_slas(struct adf_accel_dev * accel_dev,u32 vf_nr,struct mig_user_sla * pmig_slas)312 static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr,
313 struct mig_user_sla *pmig_slas)
314 {
315 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
316 struct adf_rl *rl_data = accel_dev->rate_limiting;
317 struct rl_sla **sla_type_arr = NULL;
318 u64 rp_mask, rp_index;
319 u32 max_num_sla;
320 u32 sla_cnt = 0;
321 int i, j;
322
323 if (!accel_dev->rate_limiting)
324 return 0;
325
326 rp_index = vf_nr * hw_data->num_banks_per_vf;
327 max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr);
328
329 for (i = 0; i < max_num_sla; i++) {
330 if (!sla_type_arr[i])
331 continue;
332
333 rp_mask = 0;
334 for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++)
335 rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]);
336
337 if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) {
338 pmig_slas->rp_mask = rp_mask;
339 pmig_slas->cir = sla_type_arr[i]->cir;
340 pmig_slas->pir = sla_type_arr[i]->pir;
341 pmig_slas->srv = sla_type_arr[i]->srv;
342 pmig_slas++;
343 sla_cnt++;
344 }
345 }
346
347 return sla_cnt;
348 }
349
adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr * sub_mgr,u8 * state,u32 size,void * opa)350 static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
351 u8 *state, u32 size, void *opa)
352 {
353 struct adf_vf_bank_info *vf_bank_info = opa;
354 struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
355 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
356 u32 pf_bank_nr;
357 int ret;
358
359 pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
360 ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
361 (struct bank_state *)state);
362 if (ret) {
363 dev_err(&GET_DEV(accel_dev),
364 "Failed to load regs for vf%d bank%d\n",
365 vf_bank_info->vf_nr, vf_bank_info->bank_nr);
366 return ret;
367 }
368
369 return 0;
370 }
371
adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev * accel_dev,u32 vf_nr,u32 bank_nr,struct adf_mstate_mgr * mstate_mgr)372 static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev,
373 u32 vf_nr, u32 bank_nr,
374 struct adf_mstate_mgr *mstate_mgr)
375 {
376 struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
377 struct adf_mstate_sect_h *subsec, *l2_subsec;
378 struct adf_mstate_mgr sub_sects_mgr;
379 char bank_ids[ADF_MSTATE_ID_LEN];
380
381 snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
382 subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
383 if (!subsec) {
384 dev_err(&GET_DEV(accel_dev),
385 "Failed to lookup sec %s for vf%d bank%d\n",
386 ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
387 return -EINVAL;
388 }
389
390 adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
391 l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
392 adf_gen4_vfmig_load_etr_regs,
393 &vf_bank_info);
394 if (!l2_subsec) {
395 dev_err(&GET_DEV(accel_dev),
396 "Failed to add sec %s for vf%d bank%d\n",
397 ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
398 return -EINVAL;
399 }
400
401 return 0;
402 }
403
adf_gen4_vfmig_load_etr(struct adf_accel_dev * accel_dev,u32 vf_nr)404 static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
405 {
406 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
407 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
408 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
409 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
410 struct adf_mstate_mgr sub_sects_mgr;
411 struct adf_mstate_sect_h *subsec;
412 int ret, i;
413
414 subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL,
415 NULL);
416 if (!subsec) {
417 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
418 ADF_MSTATE_ETRB_IDS);
419 return -EINVAL;
420 }
421
422 adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
423 for (i = 0; i < hw_data->num_banks_per_vf; i++) {
424 ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i,
425 &sub_sects_mgr);
426 if (ret)
427 return ret;
428 }
429
430 return 0;
431 }
432
adf_gen4_vfmig_load_misc(struct adf_accel_dev * accel_dev,u32 vf_nr)433 static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
434 {
435 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
436 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
437 void __iomem *csr = adf_get_pmisc_base(accel_dev);
438 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
439 struct adf_mstate_sect_h *subsec, *l2_subsec;
440 struct adf_mstate_mgr sub_sects_mgr;
441 struct {
442 char *id;
443 u64 ofs;
444 } misc_states[] = {
445 {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
446 {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
447 {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
448 {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
449 };
450 int i;
451
452 subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL,
453 NULL);
454 if (!subsec) {
455 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
456 ADF_MSTATE_MISCB_IDS);
457 return -EINVAL;
458 }
459
460 adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
461 for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
462 struct adf_mstate_vreginfo info;
463 u32 regv;
464
465 info.addr = ®v;
466 info.size = sizeof(regv);
467 l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
468 misc_states[i].id,
469 adf_mstate_set_vreg,
470 &info);
471 if (!l2_subsec) {
472 dev_err(&GET_DEV(accel_dev),
473 "Failed to load sec %s\n", misc_states[i].id);
474 return -EINVAL;
475 }
476 ADF_CSR_WR(csr, misc_states[i].ofs, regv);
477 }
478
479 return 0;
480 }
481
adf_gen4_vfmig_load_generic(struct adf_accel_dev * accel_dev,u32 vf_nr)482 static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
483 {
484 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
485 struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
486 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
487 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
488 struct adf_mstate_sect_h *subsec, *l2_subsec;
489 struct adf_mstate_mgr sub_sects_mgr;
490 u32 dst_sla_cnt;
491 struct {
492 char *id;
493 int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
494 struct adf_mstate_vreginfo info;
495 } gen_states[] = {
496 {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg,
497 {&vf_info->init, sizeof(vf_info->init)}},
498 {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check,
499 {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
500 {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}},
501 };
502 int i;
503
504 subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
505 if (!subsec) {
506 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
507 ADF_MSTATE_GEN_IDS);
508 return -EINVAL;
509 }
510
511 adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
512 for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
513 if (gen_states[i].info.addr == dst_slas) {
514 dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas);
515 gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla);
516 }
517
518 l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
519 gen_states[i].id,
520 gen_states[i].action,
521 &gen_states[i].info);
522 if (!l2_subsec) {
523 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
524 gen_states[i].id);
525 return -EINVAL;
526 }
527 }
528
529 return 0;
530 }
531
adf_gen4_vfmig_load_config(struct adf_accel_dev * accel_dev,u32 vf_nr)532 static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
533 {
534 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
535 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
536 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
537 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
538 struct adf_mstate_sect_h *subsec, *l2_subsec;
539 struct adf_mstate_mgr sub_sects_mgr;
540 struct {
541 char *id;
542 int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
543 struct adf_mstate_vreginfo info;
544 } setups[] = {
545 {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
546 {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
547 {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
548 {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
549 {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
550 {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
551 };
552 int i;
553
554 subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
555 if (!subsec) {
556 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
557 ADF_MSTATE_CONFIG_IDS);
558 return -EINVAL;
559 }
560
561 adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
562 for (i = 0; i < ARRAY_SIZE(setups); i++) {
563 l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id,
564 setups[i].action, &setups[i].info);
565 if (!l2_subsec) {
566 dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
567 setups[i].id);
568 return -EINVAL;
569 }
570 }
571
572 return 0;
573 }
574
adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr * subs,u8 * state,u32 size,void * opa)575 static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
576 u32 size, void *opa)
577 {
578 struct adf_vf_bank_info *vf_bank_info = opa;
579 struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
580 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
581 u32 pf_bank_nr;
582 int ret;
583
584 pf_bank_nr = vf_bank_info->bank_nr;
585 pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
586
587 ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
588 (struct bank_state *)state);
589 if (ret) {
590 dev_err(&GET_DEV(accel_dev),
591 "Failed to save regs for vf%d bank%d\n",
592 vf_bank_info->vf_nr, vf_bank_info->bank_nr);
593 return ret;
594 }
595
596 return sizeof(struct bank_state);
597 }
598
adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev * accel_dev,u32 vf_nr,u32 bank_nr,struct adf_mstate_mgr * mstate_mgr)599 static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
600 u32 vf_nr, u32 bank_nr,
601 struct adf_mstate_mgr *mstate_mgr)
602 {
603 struct adf_mstate_sect_h *subsec, *l2_subsec;
604 struct adf_vf_bank_info vf_bank_info;
605 struct adf_mstate_mgr sub_sects_mgr;
606 char bank_ids[ADF_MSTATE_ID_LEN];
607
608 snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
609
610 subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
611 if (!subsec) {
612 dev_err(&GET_DEV(accel_dev),
613 "Failed to add sec %s for vf%d bank%d\n",
614 ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
615 return -EINVAL;
616 }
617
618 adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
619 vf_bank_info.accel_dev = accel_dev;
620 vf_bank_info.vf_nr = vf_nr;
621 vf_bank_info.bank_nr = bank_nr;
622 l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
623 adf_gen4_vfmig_save_etr_regs,
624 &vf_bank_info);
625 if (!l2_subsec) {
626 dev_err(&GET_DEV(accel_dev),
627 "Failed to add sec %s for vf%d bank%d\n",
628 ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
629 return -EINVAL;
630 }
631 adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
632
633 return 0;
634 }
635
adf_gen4_vfmig_save_etr(struct adf_accel_dev * accel_dev,u32 vf_nr)636 static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
637 {
638 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
639 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
640 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
641 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
642 struct adf_mstate_mgr sub_sects_mgr;
643 struct adf_mstate_sect_h *subsec;
644 int ret, i;
645
646 subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
647 if (!subsec) {
648 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
649 ADF_MSTATE_ETRB_IDS);
650 return -EINVAL;
651 }
652
653 adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
654 for (i = 0; i < hw_data->num_banks_per_vf; i++) {
655 ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i,
656 &sub_sects_mgr);
657 if (ret)
658 return ret;
659 }
660 adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
661
662 return 0;
663 }
664
adf_gen4_vfmig_save_misc(struct adf_accel_dev * accel_dev,u32 vf_nr)665 static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
666 {
667 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
668 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
669 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
670 void __iomem *csr = adf_get_pmisc_base(accel_dev);
671 struct adf_mstate_sect_h *subsec, *l2_subsec;
672 struct adf_mstate_mgr sub_sects_mgr;
673 struct {
674 char *id;
675 u64 offset;
676 } misc_states[] = {
677 {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
678 {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
679 {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
680 {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
681 {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
682 {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
683 };
684 ktime_t time_exp;
685 int i;
686
687 subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
688 if (!subsec) {
689 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
690 ADF_MSTATE_MISCB_IDS);
691 return -EINVAL;
692 }
693
694 time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US);
695 while (!mutex_trylock(&vf_info->pfvf_mig_lock)) {
696 if (ktime_after(ktime_get(), time_exp)) {
697 dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n");
698 return -ETIMEDOUT;
699 }
700 usleep_range(500, 1000);
701 }
702
703 adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
704 for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
705 struct adf_mstate_vreginfo info;
706 u32 regv;
707
708 info.addr = ®v;
709 info.size = sizeof(regv);
710 regv = ADF_CSR_RD(csr, misc_states[i].offset);
711
712 l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
713 misc_states[i].id,
714 &info);
715 if (!l2_subsec) {
716 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
717 misc_states[i].id);
718 mutex_unlock(&vf_info->pfvf_mig_lock);
719 return -EINVAL;
720 }
721 }
722
723 mutex_unlock(&vf_info->pfvf_mig_lock);
724 adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
725
726 return 0;
727 }
728
adf_gen4_vfmig_save_generic(struct adf_accel_dev * accel_dev,u32 vf_nr)729 static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
730 {
731 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
732 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
733 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
734 struct adf_mstate_mgr sub_sects_mgr;
735 struct adf_mstate_sect_h *subsec, *l2_subsec;
736 struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
737 u32 src_sla_cnt;
738 struct {
739 char *id;
740 struct adf_mstate_vreginfo info;
741 } gen_states[] = {
742 {ADF_MSTATE_IOV_INIT_IDS,
743 {&vf_info->init, sizeof(vf_info->init)}},
744 {ADF_MSTATE_COMPAT_VER_IDS,
745 {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
746 {ADF_MSTATE_SLA_IDS, {src_slas, 0}},
747 };
748 int i;
749
750 subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
751 if (!subsec) {
752 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
753 ADF_MSTATE_GEN_IDS);
754 return -EINVAL;
755 }
756
757 adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
758 for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
759 if (gen_states[i].info.addr == src_slas) {
760 src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas);
761 gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla);
762 }
763
764 l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
765 gen_states[i].id,
766 &gen_states[i].info);
767 if (!l2_subsec) {
768 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
769 gen_states[i].id);
770 return -EINVAL;
771 }
772 }
773 adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
774
775 return 0;
776 }
777
adf_gen4_vfmig_save_config(struct adf_accel_dev * accel_dev,u32 vf_nr)778 static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
779 {
780 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
781 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
782 struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
783 struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
784 struct adf_mstate_mgr sub_sects_mgr;
785 struct adf_mstate_sect_h *subsec, *l2_subsec;
786 struct {
787 char *id;
788 struct adf_mstate_vreginfo info;
789 } setups[] = {
790 {ADF_MSTATE_GEN_CAP_IDS,
791 {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
792 {ADF_MSTATE_GEN_SVCMAP_IDS,
793 {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
794 {ADF_MSTATE_GEN_EXTDC_IDS,
795 {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
796 };
797 int i;
798
799 subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
800 if (!subsec) {
801 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
802 ADF_MSTATE_CONFIG_IDS);
803 return -EINVAL;
804 }
805
806 adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
807 for (i = 0; i < ARRAY_SIZE(setups); i++) {
808 l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id,
809 &setups[i].info);
810 if (!l2_subsec) {
811 dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
812 setups[i].id);
813 return -EINVAL;
814 }
815 }
816 adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
817
818 return 0;
819 }
820
adf_gen4_vfmig_save_state(struct qat_mig_dev * mdev)821 static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev)
822 {
823 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
824 struct adf_accel_vf_info *vf_info;
825 struct adf_gen4_vfmig *vfmig;
826 u32 vf_nr = mdev->vf_id;
827 int ret;
828
829 vf_info = &accel_dev->pf.vf_info[vf_nr];
830 vfmig = vf_info->mig_priv;
831
832 ret = adf_gen4_vfmig_save_setup(mdev);
833 if (ret) {
834 dev_err(&GET_DEV(accel_dev),
835 "Failed to save setup for vf_nr %d\n", vf_nr);
836 return ret;
837 }
838
839 adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size,
840 mdev->state_size - mdev->setup_size);
841 if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
842 return -EINVAL;
843
844 ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
845 if (ret) {
846 dev_err(&GET_DEV(accel_dev),
847 "Failed to save generic state for vf_nr %d\n", vf_nr);
848 return ret;
849 }
850
851 ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
852 if (ret) {
853 dev_err(&GET_DEV(accel_dev),
854 "Failed to save misc bar state for vf_nr %d\n", vf_nr);
855 return ret;
856 }
857
858 ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
859 if (ret) {
860 dev_err(&GET_DEV(accel_dev),
861 "Failed to save etr bar state for vf_nr %d\n", vf_nr);
862 return ret;
863 }
864
865 adf_mstate_preamble_update(vfmig->mstate_mgr);
866
867 return 0;
868 }
869
adf_gen4_vfmig_load_state(struct qat_mig_dev * mdev)870 static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev)
871 {
872 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
873 struct adf_accel_vf_info *vf_info;
874 struct adf_gen4_vfmig *vfmig;
875 u32 vf_nr = mdev->vf_id;
876 int ret;
877
878 vf_info = &accel_dev->pf.vf_info[vf_nr];
879 vfmig = vf_info->mig_priv;
880
881 ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size);
882 if (ret) {
883 dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n",
884 vf_nr);
885 return ret;
886 }
887
888 ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr,
889 mdev->state + mdev->remote_setup_size,
890 mdev->state_size - mdev->remote_setup_size,
891 NULL, NULL);
892 if (ret) {
893 dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n",
894 vf_nr);
895 return ret;
896 }
897
898 ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
899 if (ret) {
900 dev_err(&GET_DEV(accel_dev),
901 "Failed to load general state for vf_nr %d\n", vf_nr);
902 return ret;
903 }
904
905 ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
906 if (ret) {
907 dev_err(&GET_DEV(accel_dev),
908 "Failed to load misc bar state for vf_nr %d\n", vf_nr);
909 return ret;
910 }
911
912 ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
913 if (ret) {
914 dev_err(&GET_DEV(accel_dev),
915 "Failed to load etr bar state for vf_nr %d\n", vf_nr);
916 return ret;
917 }
918
919 return 0;
920 }
921
adf_gen4_vfmig_save_setup(struct qat_mig_dev * mdev)922 static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev)
923 {
924 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
925 struct adf_accel_vf_info *vf_info;
926 struct adf_gen4_vfmig *vfmig;
927 u32 vf_nr = mdev->vf_id;
928 int ret;
929
930 vf_info = &accel_dev->pf.vf_info[vf_nr];
931 vfmig = vf_info->mig_priv;
932
933 if (mdev->setup_size)
934 return 0;
935
936 adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
937 if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
938 return -EINVAL;
939
940 ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id);
941 if (ret)
942 return ret;
943
944 adf_mstate_preamble_update(vfmig->mstate_mgr);
945 mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr);
946
947 return 0;
948 }
949
adf_gen4_vfmig_load_setup(struct qat_mig_dev * mdev,int len)950 static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len)
951 {
952 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
953 struct adf_accel_vf_info *vf_info;
954 struct adf_gen4_vfmig *vfmig;
955 u32 vf_nr = mdev->vf_id;
956 u32 setup_size;
957 int ret;
958
959 vf_info = &accel_dev->pf.vf_info[vf_nr];
960 vfmig = vf_info->mig_priv;
961
962 if (mdev->remote_setup_size)
963 return 0;
964
965 if (len < sizeof(struct adf_mstate_preh))
966 return -EAGAIN;
967
968 adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
969 setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr);
970 if (setup_size > mdev->state_size)
971 return -EINVAL;
972
973 if (len < setup_size)
974 return -EAGAIN;
975
976 ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state,
977 setup_size, NULL, NULL);
978 if (ret) {
979 dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n",
980 vf_nr);
981 return ret;
982 }
983
984 mdev->remote_setup_size = setup_size;
985
986 ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr);
987 if (ret) {
988 dev_err(&GET_DEV(accel_dev),
989 "Failed to load config for vf_nr %d\n", vf_nr);
990 return ret;
991 }
992
993 return 0;
994 }
995
adf_gen4_init_vf_mig_ops(struct qat_migdev_ops * vfmig_ops)996 void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
997 {
998 vfmig_ops->init = adf_gen4_vfmig_init_device;
999 vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device;
1000 vfmig_ops->reset = adf_gen4_vfmig_reset_device;
1001 vfmig_ops->open = adf_gen4_vfmig_open_device;
1002 vfmig_ops->close = adf_gen4_vfmig_close_device;
1003 vfmig_ops->suspend = adf_gen4_vfmig_suspend_device;
1004 vfmig_ops->resume = adf_gen4_vfmig_resume_device;
1005 vfmig_ops->save_state = adf_gen4_vfmig_save_state;
1006 vfmig_ops->load_state = adf_gen4_vfmig_load_state;
1007 vfmig_ops->load_setup = adf_gen4_vfmig_load_setup;
1008 vfmig_ops->save_setup = adf_gen4_vfmig_save_setup;
1009 }
1010 EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
1011