xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024 Intel Corporation */
3 #include <linux/delay.h>
4 #include <linux/dev_printk.h>
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <asm/errno.h>
10 
11 #include "adf_accel_devices.h"
12 #include "adf_bank_state.h"
13 #include "adf_common_drv.h"
14 #include "adf_gen4_hw_data.h"
15 #include "adf_gen4_pfvf.h"
16 #include "adf_pfvf_utils.h"
17 #include "adf_mstate_mgr.h"
18 #include "adf_gen4_vf_mig.h"
19 
20 #define ADF_GEN4_VF_MSTATE_SIZE		4096
21 #define ADF_GEN4_PFVF_RSP_TIMEOUT_US	5000
22 
23 static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev);
24 static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len);
25 
adf_gen4_vfmig_init_device(struct qat_mig_dev * mdev)26 static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev)
27 {
28 	u8 *state;
29 
30 	state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL);
31 	if (!state)
32 		return -ENOMEM;
33 
34 	mdev->state = state;
35 	mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE;
36 	mdev->setup_size = 0;
37 	mdev->remote_setup_size = 0;
38 
39 	return 0;
40 }
41 
adf_gen4_vfmig_cleanup_device(struct qat_mig_dev * mdev)42 static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev)
43 {
44 	kfree(mdev->state);
45 	mdev->state = NULL;
46 }
47 
adf_gen4_vfmig_reset_device(struct qat_mig_dev * mdev)48 static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev)
49 {
50 	mdev->setup_size = 0;
51 	mdev->remote_setup_size = 0;
52 }
53 
adf_gen4_vfmig_open_device(struct qat_mig_dev * mdev)54 static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev)
55 {
56 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
57 	struct adf_accel_vf_info *vf_info;
58 	struct adf_gen4_vfmig *vfmig;
59 
60 	vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
61 
62 	vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
63 	if (!vfmig)
64 		return -ENOMEM;
65 
66 	vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size);
67 	if (!vfmig->mstate_mgr) {
68 		kfree(vfmig);
69 		return -ENOMEM;
70 	}
71 	vf_info->mig_priv = vfmig;
72 	mdev->setup_size = 0;
73 	mdev->remote_setup_size = 0;
74 
75 	return 0;
76 }
77 
adf_gen4_vfmig_close_device(struct qat_mig_dev * mdev)78 static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev)
79 {
80 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
81 	struct adf_accel_vf_info *vf_info;
82 	struct adf_gen4_vfmig *vfmig;
83 
84 	vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
85 	if (vf_info->mig_priv) {
86 		vfmig = vf_info->mig_priv;
87 		adf_mstate_mgr_destroy(vfmig->mstate_mgr);
88 		kfree(vfmig);
89 		vf_info->mig_priv = NULL;
90 	}
91 }
92 
adf_gen4_vfmig_suspend_device(struct qat_mig_dev * mdev)93 static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev)
94 {
95 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
96 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
97 	struct adf_accel_vf_info *vf_info;
98 	struct adf_gen4_vfmig *vf_mig;
99 	u32 vf_nr = mdev->vf_id;
100 	int ret, i;
101 
102 	vf_info = &accel_dev->pf.vf_info[vf_nr];
103 	vf_mig = vf_info->mig_priv;
104 
105 	/* Stop all inflight jobs */
106 	for (i = 0; i < hw_data->num_banks_per_vf; i++) {
107 		u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
108 
109 		ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr,
110 						ADF_RPRESET_POLL_TIMEOUT_US);
111 		if (ret) {
112 			dev_err(&GET_DEV(accel_dev),
113 				"Failed to drain bank %d for vf_nr %d\n", i,
114 				vf_nr);
115 			return ret;
116 		}
117 		vf_mig->bank_stopped[i] = true;
118 
119 		adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr,
120 						 ADF_COALESCED_POLL_TIMEOUT_US);
121 	}
122 
123 	return 0;
124 }
125 
adf_gen4_vfmig_resume_device(struct qat_mig_dev * mdev)126 static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev)
127 {
128 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
129 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
130 	struct adf_accel_vf_info *vf_info;
131 	struct adf_gen4_vfmig *vf_mig;
132 	u32 vf_nr = mdev->vf_id;
133 	int i;
134 
135 	vf_info = &accel_dev->pf.vf_info[vf_nr];
136 	vf_mig = vf_info->mig_priv;
137 
138 	for (i = 0; i < hw_data->num_banks_per_vf; i++) {
139 		u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
140 
141 		if (vf_mig->bank_stopped[i]) {
142 			adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr);
143 			vf_mig->bank_stopped[i] = false;
144 		}
145 	}
146 
147 	return 0;
148 }
149 
150 struct adf_vf_bank_info {
151 	struct adf_accel_dev *accel_dev;
152 	u32 vf_nr;
153 	u32 bank_nr;
154 };
155 
156 struct mig_user_sla {
157 	enum adf_base_services srv;
158 	u64 rp_mask;
159 	u32 cir;
160 	u32 pir;
161 };
162 
adf_mstate_sla_check(struct adf_mstate_mgr * sub_mgr,u8 * src_buf,u32 src_size,void * opaque)163 static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf,
164 				u32 src_size, void *opaque)
165 {
166 	struct adf_mstate_vreginfo _sinfo = { src_buf, src_size };
167 	struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque;
168 	u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla);
169 	u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla);
170 	struct mig_user_sla *src_slas = sinfo->addr;
171 	struct mig_user_sla *dst_slas = dinfo->addr;
172 	int i, j;
173 
174 	for (i = 0; i < src_sla_cnt; i++) {
175 		for (j = 0; j < dst_sla_cnt; j++) {
176 			if (src_slas[i].srv != dst_slas[j].srv ||
177 			    src_slas[i].rp_mask != dst_slas[j].rp_mask)
178 				continue;
179 
180 			if (src_slas[i].cir > dst_slas[j].cir ||
181 			    src_slas[i].pir > dst_slas[j].pir) {
182 				pr_err("QAT: DST VF rate limiting mismatch.\n");
183 				return -EINVAL;
184 			}
185 			break;
186 		}
187 
188 		if (j == dst_sla_cnt) {
189 			pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n",
190 			       src_slas[i].srv, src_slas[i].rp_mask);
191 			return -EINVAL;
192 		}
193 	}
194 
195 	return 0;
196 }
197 
adf_mstate_check_cap_size(u32 src_sz,u32 dst_sz,u32 max_sz)198 static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz)
199 {
200 	if (src_sz > max_sz || dst_sz > max_sz)
201 		return -EINVAL;
202 	else
203 		return 0;
204 }
205 
adf_mstate_compatver_check(struct adf_mstate_mgr * sub_mgr,u8 * src_buf,u32 src_sz,void * opaque)206 static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr,
207 				      u8 *src_buf, u32 src_sz, void *opaque)
208 {
209 	struct adf_mstate_vreginfo *info = opaque;
210 	u8 compat = 0;
211 	u8 *pcompat;
212 
213 	if (src_sz != info->size) {
214 		pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n",
215 			 src_sz, info->size);
216 		return -EINVAL;
217 	}
218 
219 	memcpy(info->addr, src_buf, info->size);
220 	pcompat = info->addr;
221 	if (*pcompat == 0) {
222 		pr_warn("QAT: Unable to determine the version of VF\n");
223 		return 0;
224 	}
225 
226 	compat = adf_vf_compat_checker(*pcompat);
227 	if (compat == ADF_PF2VF_VF_INCOMPATIBLE) {
228 		pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n",
229 			 *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
230 		return -EINVAL;
231 	}
232 
233 	if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN)
234 		pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n",
235 			 *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
236 
237 	return 0;
238 }
239 
240 /*
241  * adf_mstate_capmask_compare() - compare QAT device capability mask
242  * @sinfo:	Pointer to source capability info
243  * @dinfo:	Pointer to target capability info
244  *
245  * This function compares the capability mask between source VF and target VF
246  *
247  * Returns: 0 if target capability mask is identical to source capability mask,
248  * 1 if target mask can represent all the capabilities represented by source mask,
249  * -1 if target mask can't represent all the capabilities represented by source
250  * mask.
251  */
adf_mstate_capmask_compare(struct adf_mstate_vreginfo * sinfo,struct adf_mstate_vreginfo * dinfo)252 static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
253 				      struct adf_mstate_vreginfo *dinfo)
254 {
255 	u64 src = 0, dst = 0;
256 
257 	if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) {
258 		pr_debug("QAT: Unexpected capability size %u %u %zu\n",
259 			 sinfo->size, dinfo->size, sizeof(u64));
260 		return -1;
261 	}
262 
263 	memcpy(&src, sinfo->addr, sinfo->size);
264 	memcpy(&dst, dinfo->addr, dinfo->size);
265 
266 	pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst);
267 
268 	if (src == dst)
269 		return 0;
270 
271 	if ((src | dst) == dst)
272 		return 1;
273 
274 	return -1;
275 }
276 
adf_mstate_capmask_superset(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)277 static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf,
278 				       u32 size, void *opa)
279 {
280 	struct adf_mstate_vreginfo sinfo = { buf, size };
281 
282 	if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
283 		return 0;
284 
285 	return -EINVAL;
286 }
287 
adf_mstate_capmask_equal(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)288 static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf,
289 				    u32 size, void *opa)
290 {
291 	struct adf_mstate_vreginfo sinfo = { buf, size };
292 
293 	if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
294 		return 0;
295 
296 	return -EINVAL;
297 }
298 
adf_mstate_set_vreg(struct adf_mstate_mgr * sub_mgr,u8 * buf,u32 size,void * opa)299 static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf,
300 			       u32 size, void *opa)
301 {
302 	struct adf_mstate_vreginfo *info = opa;
303 
304 	if (size != info->size) {
305 		pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size);
306 		return -EINVAL;
307 	}
308 	memcpy(info->addr, buf, info->size);
309 
310 	return 0;
311 }
312 
adf_gen4_vfmig_get_slas(struct adf_accel_dev * accel_dev,u32 vf_nr,struct mig_user_sla * pmig_slas)313 static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr,
314 				   struct mig_user_sla *pmig_slas)
315 {
316 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
317 	struct adf_rl *rl_data = accel_dev->rate_limiting;
318 	struct rl_sla **sla_type_arr = NULL;
319 	u64 rp_mask, rp_index;
320 	u32 max_num_sla;
321 	u32 sla_cnt = 0;
322 	int i, j;
323 
324 	if (!accel_dev->rate_limiting)
325 		return 0;
326 
327 	rp_index = vf_nr * hw_data->num_banks_per_vf;
328 	max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr);
329 
330 	for (i = 0; i < max_num_sla; i++) {
331 		if (!sla_type_arr[i])
332 			continue;
333 
334 		rp_mask = 0;
335 		for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++)
336 			rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]);
337 
338 		if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) {
339 			pmig_slas->rp_mask = rp_mask;
340 			pmig_slas->cir = sla_type_arr[i]->cir;
341 			pmig_slas->pir = sla_type_arr[i]->pir;
342 			pmig_slas->srv = sla_type_arr[i]->srv;
343 			pmig_slas++;
344 			sla_cnt++;
345 		}
346 	}
347 
348 	return sla_cnt;
349 }
350 
adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr * sub_mgr,u8 * state,u32 size,void * opa)351 static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
352 					u8 *state, u32 size, void *opa)
353 {
354 	struct adf_vf_bank_info *vf_bank_info = opa;
355 	struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
356 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
357 	u32 pf_bank_nr;
358 	int ret;
359 
360 	pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
361 	ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
362 					  (struct adf_bank_state *)state);
363 	if (ret) {
364 		dev_err(&GET_DEV(accel_dev),
365 			"Failed to load regs for vf%d bank%d\n",
366 			vf_bank_info->vf_nr, vf_bank_info->bank_nr);
367 		return ret;
368 	}
369 
370 	return 0;
371 }
372 
adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev * accel_dev,u32 vf_nr,u32 bank_nr,struct adf_mstate_mgr * mstate_mgr)373 static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev,
374 					u32 vf_nr, u32 bank_nr,
375 					struct adf_mstate_mgr *mstate_mgr)
376 {
377 	struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
378 	struct adf_mstate_sect_h *subsec, *l2_subsec;
379 	struct adf_mstate_mgr sub_sects_mgr;
380 	char bank_ids[ADF_MSTATE_ID_LEN];
381 
382 	snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
383 	subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
384 	if (!subsec) {
385 		dev_err(&GET_DEV(accel_dev),
386 			"Failed to lookup sec %s for vf%d bank%d\n",
387 			ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
388 		return -EINVAL;
389 	}
390 
391 	adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
392 	l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
393 					   adf_gen4_vfmig_load_etr_regs,
394 					   &vf_bank_info);
395 	if (!l2_subsec) {
396 		dev_err(&GET_DEV(accel_dev),
397 			"Failed to add sec %s for vf%d bank%d\n",
398 			ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
399 		return -EINVAL;
400 	}
401 
402 	return 0;
403 }
404 
adf_gen4_vfmig_load_etr(struct adf_accel_dev * accel_dev,u32 vf_nr)405 static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
406 {
407 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
408 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
409 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
410 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
411 	struct adf_mstate_mgr sub_sects_mgr;
412 	struct adf_mstate_sect_h *subsec;
413 	int ret, i;
414 
415 	subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL,
416 					NULL);
417 	if (!subsec) {
418 		dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
419 			ADF_MSTATE_ETRB_IDS);
420 		return -EINVAL;
421 	}
422 
423 	adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
424 	for (i = 0; i < hw_data->num_banks_per_vf; i++) {
425 		ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i,
426 						   &sub_sects_mgr);
427 		if (ret)
428 			return ret;
429 	}
430 
431 	return 0;
432 }
433 
adf_gen4_vfmig_load_misc(struct adf_accel_dev * accel_dev,u32 vf_nr)434 static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
435 {
436 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
437 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
438 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
439 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
440 	struct adf_mstate_sect_h *subsec, *l2_subsec;
441 	struct adf_mstate_mgr sub_sects_mgr;
442 	struct {
443 		char *id;
444 		u64 ofs;
445 	} misc_states[] = {
446 		{ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
447 		{ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
448 		{ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
449 		{ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
450 	};
451 	int i;
452 
453 	subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL,
454 					NULL);
455 	if (!subsec) {
456 		dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
457 			ADF_MSTATE_MISCB_IDS);
458 		return -EINVAL;
459 	}
460 
461 	adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
462 	for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
463 		struct adf_mstate_vreginfo info;
464 		u32 regv;
465 
466 		info.addr = &regv;
467 		info.size = sizeof(regv);
468 		l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
469 						   misc_states[i].id,
470 						   adf_mstate_set_vreg,
471 						   &info);
472 		if (!l2_subsec) {
473 			dev_err(&GET_DEV(accel_dev),
474 				"Failed to load sec %s\n", misc_states[i].id);
475 			return -EINVAL;
476 		}
477 		ADF_CSR_WR(csr, misc_states[i].ofs, regv);
478 	}
479 
480 	return 0;
481 }
482 
adf_gen4_vfmig_load_generic(struct adf_accel_dev * accel_dev,u32 vf_nr)483 static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
484 {
485 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
486 	struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
487 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
488 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
489 	struct adf_mstate_sect_h *subsec, *l2_subsec;
490 	struct adf_mstate_mgr sub_sects_mgr;
491 	u32 dst_sla_cnt;
492 	struct {
493 		char *id;
494 		int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
495 		struct adf_mstate_vreginfo info;
496 	} gen_states[] = {
497 		{ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg,
498 		{&vf_info->init, sizeof(vf_info->init)}},
499 		{ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check,
500 		{&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
501 		{ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}},
502 	};
503 	int i;
504 
505 	subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
506 	if (!subsec) {
507 		dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
508 			ADF_MSTATE_GEN_IDS);
509 		return -EINVAL;
510 	}
511 
512 	adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
513 	for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
514 		if (gen_states[i].info.addr == dst_slas) {
515 			dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas);
516 			gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla);
517 		}
518 
519 		l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
520 						   gen_states[i].id,
521 						   gen_states[i].action,
522 						   &gen_states[i].info);
523 		if (!l2_subsec) {
524 			dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
525 				gen_states[i].id);
526 			return -EINVAL;
527 		}
528 	}
529 
530 	return 0;
531 }
532 
adf_gen4_vfmig_load_config(struct adf_accel_dev * accel_dev,u32 vf_nr)533 static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
534 {
535 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
536 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
537 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
538 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
539 	struct adf_mstate_sect_h *subsec, *l2_subsec;
540 	struct adf_mstate_mgr sub_sects_mgr;
541 	struct {
542 		char *id;
543 		int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
544 		struct adf_mstate_vreginfo info;
545 	} setups[] = {
546 		{ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
547 		{&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
548 		{ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
549 		{&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
550 		{ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
551 		{&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
552 	};
553 	int i;
554 
555 	subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
556 	if (!subsec) {
557 		dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
558 			ADF_MSTATE_CONFIG_IDS);
559 		return -EINVAL;
560 	}
561 
562 	adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
563 	for (i = 0; i < ARRAY_SIZE(setups); i++) {
564 		l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id,
565 						   setups[i].action, &setups[i].info);
566 		if (!l2_subsec) {
567 			dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
568 				setups[i].id);
569 			return -EINVAL;
570 		}
571 	}
572 
573 	return 0;
574 }
575 
adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr * subs,u8 * state,u32 size,void * opa)576 static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
577 					u32 size, void *opa)
578 {
579 	struct adf_vf_bank_info *vf_bank_info = opa;
580 	struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
581 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
582 	u32 pf_bank_nr;
583 	int ret;
584 
585 	pf_bank_nr = vf_bank_info->bank_nr;
586 	pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
587 
588 	ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
589 				       (struct adf_bank_state *)state);
590 	if (ret) {
591 		dev_err(&GET_DEV(accel_dev),
592 			"Failed to save regs for vf%d bank%d\n",
593 			vf_bank_info->vf_nr, vf_bank_info->bank_nr);
594 		return ret;
595 	}
596 
597 	return sizeof(struct adf_bank_state);
598 }
599 
adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev * accel_dev,u32 vf_nr,u32 bank_nr,struct adf_mstate_mgr * mstate_mgr)600 static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
601 					u32 vf_nr, u32 bank_nr,
602 					struct adf_mstate_mgr *mstate_mgr)
603 {
604 	struct adf_mstate_sect_h *subsec, *l2_subsec;
605 	struct adf_vf_bank_info vf_bank_info;
606 	struct adf_mstate_mgr sub_sects_mgr;
607 	char bank_ids[ADF_MSTATE_ID_LEN];
608 
609 	snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
610 
611 	subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
612 	if (!subsec) {
613 		dev_err(&GET_DEV(accel_dev),
614 			"Failed to add sec %s for vf%d bank%d\n",
615 			ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
616 		return -EINVAL;
617 	}
618 
619 	adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
620 	vf_bank_info.accel_dev = accel_dev;
621 	vf_bank_info.vf_nr = vf_nr;
622 	vf_bank_info.bank_nr = bank_nr;
623 	l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
624 					adf_gen4_vfmig_save_etr_regs,
625 					&vf_bank_info);
626 	if (!l2_subsec) {
627 		dev_err(&GET_DEV(accel_dev),
628 			"Failed to add sec %s for vf%d bank%d\n",
629 			ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
630 		return -EINVAL;
631 	}
632 	adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
633 
634 	return 0;
635 }
636 
adf_gen4_vfmig_save_etr(struct adf_accel_dev * accel_dev,u32 vf_nr)637 static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
638 {
639 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
640 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
641 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
642 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
643 	struct adf_mstate_mgr sub_sects_mgr;
644 	struct adf_mstate_sect_h *subsec;
645 	int ret, i;
646 
647 	subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
648 	if (!subsec) {
649 		dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
650 			ADF_MSTATE_ETRB_IDS);
651 		return -EINVAL;
652 	}
653 
654 	adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
655 	for (i = 0; i < hw_data->num_banks_per_vf; i++) {
656 		ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i,
657 						   &sub_sects_mgr);
658 		if (ret)
659 			return ret;
660 	}
661 	adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
662 
663 	return 0;
664 }
665 
adf_gen4_vfmig_save_misc(struct adf_accel_dev * accel_dev,u32 vf_nr)666 static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
667 {
668 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
669 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
670 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
671 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
672 	struct adf_mstate_sect_h *subsec, *l2_subsec;
673 	struct adf_mstate_mgr sub_sects_mgr;
674 	struct {
675 		char *id;
676 		u64 offset;
677 	} misc_states[] = {
678 		{ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
679 		{ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
680 		{ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
681 		{ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
682 		{ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
683 		{ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
684 	};
685 	ktime_t time_exp;
686 	int i;
687 
688 	subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
689 	if (!subsec) {
690 		dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
691 			ADF_MSTATE_MISCB_IDS);
692 		return -EINVAL;
693 	}
694 
695 	time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US);
696 	while (!mutex_trylock(&vf_info->pfvf_mig_lock)) {
697 		if (ktime_after(ktime_get(), time_exp)) {
698 			dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n");
699 			return -ETIMEDOUT;
700 		}
701 		usleep_range(500, 1000);
702 	}
703 
704 	adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
705 	for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
706 		struct adf_mstate_vreginfo info;
707 		u32 regv;
708 
709 		info.addr = &regv;
710 		info.size = sizeof(regv);
711 		regv = ADF_CSR_RD(csr, misc_states[i].offset);
712 
713 		l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
714 						     misc_states[i].id,
715 						     &info);
716 		if (!l2_subsec) {
717 			dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
718 				misc_states[i].id);
719 			mutex_unlock(&vf_info->pfvf_mig_lock);
720 			return -EINVAL;
721 		}
722 	}
723 
724 	mutex_unlock(&vf_info->pfvf_mig_lock);
725 	adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
726 
727 	return 0;
728 }
729 
adf_gen4_vfmig_save_generic(struct adf_accel_dev * accel_dev,u32 vf_nr)730 static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
731 {
732 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
733 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
734 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
735 	struct adf_mstate_mgr sub_sects_mgr;
736 	struct adf_mstate_sect_h *subsec, *l2_subsec;
737 	struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
738 	u32 src_sla_cnt;
739 	struct {
740 		char *id;
741 		struct adf_mstate_vreginfo info;
742 	} gen_states[] = {
743 		{ADF_MSTATE_IOV_INIT_IDS,
744 		{&vf_info->init, sizeof(vf_info->init)}},
745 		{ADF_MSTATE_COMPAT_VER_IDS,
746 		{&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
747 		{ADF_MSTATE_SLA_IDS, {src_slas, 0}},
748 	};
749 	int i;
750 
751 	subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
752 	if (!subsec) {
753 		dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
754 			ADF_MSTATE_GEN_IDS);
755 		return -EINVAL;
756 	}
757 
758 	adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
759 	for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
760 		if (gen_states[i].info.addr == src_slas) {
761 			src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas);
762 			gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla);
763 		}
764 
765 		l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
766 						     gen_states[i].id,
767 						     &gen_states[i].info);
768 		if (!l2_subsec) {
769 			dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
770 				gen_states[i].id);
771 			return -EINVAL;
772 		}
773 	}
774 	adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
775 
776 	return 0;
777 }
778 
adf_gen4_vfmig_save_config(struct adf_accel_dev * accel_dev,u32 vf_nr)779 static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
780 {
781 	struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
782 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
783 	struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
784 	struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
785 	struct adf_mstate_mgr sub_sects_mgr;
786 	struct adf_mstate_sect_h *subsec, *l2_subsec;
787 	struct {
788 		char *id;
789 		struct adf_mstate_vreginfo info;
790 	} setups[] = {
791 		{ADF_MSTATE_GEN_CAP_IDS,
792 		{&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
793 		{ADF_MSTATE_GEN_SVCMAP_IDS,
794 		{&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
795 		{ADF_MSTATE_GEN_EXTDC_IDS,
796 		{&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
797 	};
798 	int i;
799 
800 	subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
801 	if (!subsec) {
802 		dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
803 			ADF_MSTATE_CONFIG_IDS);
804 		return -EINVAL;
805 	}
806 
807 	adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
808 	for (i = 0; i < ARRAY_SIZE(setups); i++) {
809 		l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id,
810 						     &setups[i].info);
811 		if (!l2_subsec) {
812 			dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
813 				setups[i].id);
814 			return -EINVAL;
815 		}
816 	}
817 	adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
818 
819 	return 0;
820 }
821 
adf_gen4_vfmig_save_state(struct qat_mig_dev * mdev)822 static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev)
823 {
824 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
825 	struct adf_accel_vf_info *vf_info;
826 	struct adf_gen4_vfmig *vfmig;
827 	u32 vf_nr = mdev->vf_id;
828 	int ret;
829 
830 	vf_info = &accel_dev->pf.vf_info[vf_nr];
831 	vfmig = vf_info->mig_priv;
832 
833 	ret = adf_gen4_vfmig_save_setup(mdev);
834 	if (ret) {
835 		dev_err(&GET_DEV(accel_dev),
836 			"Failed to save setup for vf_nr %d\n", vf_nr);
837 		return ret;
838 	}
839 
840 	adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size,
841 			    mdev->state_size - mdev->setup_size);
842 	if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
843 		return -EINVAL;
844 
845 	ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
846 	if (ret) {
847 		dev_err(&GET_DEV(accel_dev),
848 			"Failed to save generic state for vf_nr %d\n", vf_nr);
849 		return ret;
850 	}
851 
852 	ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
853 	if (ret) {
854 		dev_err(&GET_DEV(accel_dev),
855 			"Failed to save misc bar state for vf_nr %d\n", vf_nr);
856 		return ret;
857 	}
858 
859 	ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
860 	if (ret) {
861 		dev_err(&GET_DEV(accel_dev),
862 			"Failed to save etr bar state for vf_nr %d\n", vf_nr);
863 		return ret;
864 	}
865 
866 	adf_mstate_preamble_update(vfmig->mstate_mgr);
867 
868 	return 0;
869 }
870 
adf_gen4_vfmig_load_state(struct qat_mig_dev * mdev)871 static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev)
872 {
873 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
874 	struct adf_accel_vf_info *vf_info;
875 	struct adf_gen4_vfmig *vfmig;
876 	u32 vf_nr = mdev->vf_id;
877 	int ret;
878 
879 	vf_info = &accel_dev->pf.vf_info[vf_nr];
880 	vfmig = vf_info->mig_priv;
881 
882 	ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size);
883 	if (ret) {
884 		dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n",
885 			vf_nr);
886 		return ret;
887 	}
888 
889 	ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr,
890 					      mdev->state + mdev->remote_setup_size,
891 					      mdev->state_size - mdev->remote_setup_size,
892 					      NULL, NULL);
893 	if (ret) {
894 		dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n",
895 			vf_nr);
896 		return ret;
897 	}
898 
899 	ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
900 	if (ret) {
901 		dev_err(&GET_DEV(accel_dev),
902 			"Failed to load general state for vf_nr %d\n", vf_nr);
903 		return ret;
904 	}
905 
906 	ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
907 	if (ret) {
908 		dev_err(&GET_DEV(accel_dev),
909 			"Failed to load misc bar state for vf_nr %d\n", vf_nr);
910 		return ret;
911 	}
912 
913 	ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
914 	if (ret) {
915 		dev_err(&GET_DEV(accel_dev),
916 			"Failed to load etr bar state for vf_nr %d\n", vf_nr);
917 		return ret;
918 	}
919 
920 	return 0;
921 }
922 
adf_gen4_vfmig_save_setup(struct qat_mig_dev * mdev)923 static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev)
924 {
925 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
926 	struct adf_accel_vf_info *vf_info;
927 	struct adf_gen4_vfmig *vfmig;
928 	u32 vf_nr = mdev->vf_id;
929 	int ret;
930 
931 	vf_info = &accel_dev->pf.vf_info[vf_nr];
932 	vfmig = vf_info->mig_priv;
933 
934 	if (mdev->setup_size)
935 		return 0;
936 
937 	adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
938 	if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
939 		return -EINVAL;
940 
941 	ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id);
942 	if (ret)
943 		return ret;
944 
945 	adf_mstate_preamble_update(vfmig->mstate_mgr);
946 	mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr);
947 
948 	return 0;
949 }
950 
adf_gen4_vfmig_load_setup(struct qat_mig_dev * mdev,int len)951 static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len)
952 {
953 	struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
954 	struct adf_accel_vf_info *vf_info;
955 	struct adf_gen4_vfmig *vfmig;
956 	u32 vf_nr = mdev->vf_id;
957 	u32 setup_size;
958 	int ret;
959 
960 	vf_info = &accel_dev->pf.vf_info[vf_nr];
961 	vfmig = vf_info->mig_priv;
962 
963 	if (mdev->remote_setup_size)
964 		return 0;
965 
966 	if (len < sizeof(struct adf_mstate_preh))
967 		return -EAGAIN;
968 
969 	adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
970 	setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr);
971 	if (setup_size > mdev->state_size)
972 		return -EINVAL;
973 
974 	if (len < setup_size)
975 		return -EAGAIN;
976 
977 	ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state,
978 					      setup_size, NULL, NULL);
979 	if (ret) {
980 		dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n",
981 			vf_nr);
982 		return ret;
983 	}
984 
985 	mdev->remote_setup_size = setup_size;
986 
987 	ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr);
988 	if (ret) {
989 		dev_err(&GET_DEV(accel_dev),
990 			"Failed to load config for vf_nr %d\n", vf_nr);
991 		return ret;
992 	}
993 
994 	return 0;
995 }
996 
adf_gen4_init_vf_mig_ops(struct qat_migdev_ops * vfmig_ops)997 void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
998 {
999 	vfmig_ops->init = adf_gen4_vfmig_init_device;
1000 	vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device;
1001 	vfmig_ops->reset = adf_gen4_vfmig_reset_device;
1002 	vfmig_ops->open = adf_gen4_vfmig_open_device;
1003 	vfmig_ops->close = adf_gen4_vfmig_close_device;
1004 	vfmig_ops->suspend = adf_gen4_vfmig_suspend_device;
1005 	vfmig_ops->resume = adf_gen4_vfmig_resume_device;
1006 	vfmig_ops->save_state = adf_gen4_vfmig_save_state;
1007 	vfmig_ops->load_state = adf_gen4_vfmig_load_state;
1008 	vfmig_ops->load_setup = adf_gen4_vfmig_load_setup;
1009 	vfmig_ops->save_setup = adf_gen4_vfmig_save_setup;
1010 }
1011 EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
1012