xref: /linux/drivers/crypto/intel/qat/qat_common/adf_rl.c (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 
4 #define dev_fmt(fmt) "RateLimiting: " fmt
5 
6 #include <asm/errno.h>
7 #include <asm/div64.h>
8 
9 #include <linux/dev_printk.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/units.h>
14 
15 #include "adf_accel_devices.h"
16 #include "adf_cfg_services.h"
17 #include "adf_common_drv.h"
18 #include "adf_rl_admin.h"
19 #include "adf_rl.h"
20 #include "adf_sysfs_rl.h"
21 
22 #define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET	0U
23 #define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET	0U
24 #define RL_TOKEN_PCIE_SIZE			64
25 #define RL_TOKEN_ASYM_SIZE			1024
26 #define RL_CSR_SIZE				4U
27 #define RL_CAPABILITY_MASK			GENMASK(6, 4)
28 #define RL_CAPABILITY_VALUE			0x70
29 #define RL_VALIDATE_NON_ZERO(input)		((input) == 0)
30 #define ROOT_MASK				GENMASK(1, 0)
31 #define CLUSTER_MASK				GENMASK(3, 0)
32 #define LEAF_MASK				GENMASK(5, 0)
33 
validate_user_input(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in,bool is_update)34 static int validate_user_input(struct adf_accel_dev *accel_dev,
35 			       struct adf_rl_sla_input_data *sla_in,
36 			       bool is_update)
37 {
38 	const unsigned long rp_mask = sla_in->rp_mask;
39 	size_t rp_mask_size;
40 	int i, cnt;
41 
42 	if (sla_in->pir < sla_in->cir) {
43 		dev_notice(&GET_DEV(accel_dev),
44 			   "PIR must be >= CIR, setting PIR to CIR\n");
45 		sla_in->pir = sla_in->cir;
46 	}
47 
48 	if (!is_update) {
49 		cnt = 0;
50 		rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
51 		for_each_set_bit(i, &rp_mask, rp_mask_size) {
52 			if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
53 				dev_notice(&GET_DEV(accel_dev),
54 					   "Too many ring pairs selected for this SLA\n");
55 				return -EINVAL;
56 			}
57 		}
58 
59 		if (sla_in->srv >= SVC_BASE_COUNT) {
60 			dev_notice(&GET_DEV(accel_dev),
61 				   "Wrong service type\n");
62 			return -EINVAL;
63 		}
64 
65 		if (sla_in->type > RL_LEAF) {
66 			dev_notice(&GET_DEV(accel_dev),
67 				   "Wrong node type\n");
68 			return -EINVAL;
69 		}
70 
71 		if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
72 		    sla_in->parent_id >= RL_NODES_CNT_MAX) {
73 			dev_notice(&GET_DEV(accel_dev),
74 				   "Wrong parent ID\n");
75 			return -EINVAL;
76 		}
77 	}
78 
79 	return 0;
80 }
81 
validate_sla_id(struct adf_accel_dev * accel_dev,int sla_id)82 static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
83 {
84 	struct rl_sla *sla;
85 
86 	if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) {
87 		dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n");
88 		return -EINVAL;
89 	}
90 
91 	sla = accel_dev->rate_limiting->sla[sla_id];
92 
93 	if (!sla) {
94 		dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n");
95 		return -EINVAL;
96 	}
97 
98 	if (sla->type != RL_LEAF) {
99 		dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n");
100 		return -EINVAL;
101 	}
102 
103 	return 0;
104 }
105 
106 /**
107  * find_parent() - Find the parent for a new SLA
108  * @rl_data: pointer to ratelimiting data
109  * @sla_in: pointer to user input data for a new SLA
110  *
111  * Function returns a pointer to the parent SLA. If the parent ID is provided
112  * as input in the user data, then such ID is validated and the parent SLA
113  * is returned.
114  * Otherwise, it returns the default parent SLA (root or cluster) for
115  * the new object.
116  *
117  * Return:
118  * * Pointer to the parent SLA object
119  * * NULL - when parent cannot be found
120  */
find_parent(struct adf_rl * rl_data,struct adf_rl_sla_input_data * sla_in)121 static struct rl_sla *find_parent(struct adf_rl *rl_data,
122 				  struct adf_rl_sla_input_data *sla_in)
123 {
124 	int input_parent_id = sla_in->parent_id;
125 	struct rl_sla *root = NULL;
126 	struct rl_sla *parent_sla;
127 	int i;
128 
129 	if (sla_in->type == RL_ROOT)
130 		return NULL;
131 
132 	if (input_parent_id > RL_PARENT_DEFAULT_ID) {
133 		parent_sla = rl_data->sla[input_parent_id];
134 		/*
135 		 * SLA can be a parent if it has the same service as the child
136 		 * and its type is higher in the hierarchy,
137 		 * for example the parent type of a LEAF must be a CLUSTER.
138 		 */
139 		if (parent_sla && parent_sla->srv == sla_in->srv &&
140 		    parent_sla->type == sla_in->type - 1)
141 			return parent_sla;
142 
143 		return NULL;
144 	}
145 
146 	/* If input_parent_id is not valid, get root for this service type. */
147 	for (i = 0; i < RL_ROOT_MAX; i++) {
148 		if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) {
149 			root = rl_data->root[i];
150 			break;
151 		}
152 	}
153 
154 	if (!root)
155 		return NULL;
156 
157 	/*
158 	 * If the type of this SLA is cluster, then return the root.
159 	 * Otherwise, find the default (i.e. first) cluster for this service.
160 	 */
161 	if (sla_in->type == RL_CLUSTER)
162 		return root;
163 
164 	for (i = 0; i < RL_CLUSTER_MAX; i++) {
165 		if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root)
166 			return rl_data->cluster[i];
167 	}
168 
169 	return NULL;
170 }
171 
172 /**
173  * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
174  * @rl_data: pointer to ratelimiting data
175  * @type: SLA type
176  * @sla_arr: pointer to variable where requested pointer will be stored
177  *
178  * Return: Max number of elements allowed for the returned array
179  */
adf_rl_get_sla_arr_of_type(struct adf_rl * rl_data,enum rl_node_type type,struct rl_sla *** sla_arr)180 u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
181 			       struct rl_sla ***sla_arr)
182 {
183 	switch (type) {
184 	case RL_LEAF:
185 		*sla_arr = rl_data->leaf;
186 		return RL_LEAF_MAX;
187 	case RL_CLUSTER:
188 		*sla_arr = rl_data->cluster;
189 		return RL_CLUSTER_MAX;
190 	case RL_ROOT:
191 		*sla_arr = rl_data->root;
192 		return RL_ROOT_MAX;
193 	default:
194 		*sla_arr = NULL;
195 		return 0;
196 	}
197 }
198 
199 /**
200  * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
201  * @accel_dev: pointer to acceleration device structure
202  * @sla: SLA object data where result will be written
203  * @rp_mask: bitmask of ring pair IDs
204  *
205  * Function tries to convert provided bitmap to an array of IDs. It checks if
206  * RPs aren't in use, are assigned to SLA  service or if a number of provided
207  * IDs is not too big. If successful, writes the result into the field
208  * sla->ring_pairs_cnt.
209  *
210  * Return:
211  * * 0		- ok
212  * * -EINVAL	- ring pairs array cannot be created from provided mask
213  */
prepare_rp_ids(struct adf_accel_dev * accel_dev,struct rl_sla * sla,const unsigned long rp_mask)214 static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
215 			  const unsigned long rp_mask)
216 {
217 	enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(sla->srv);
218 	u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
219 	bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
220 	size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
221 	u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks;
222 	u16 cnt = 0;
223 	u16 rp_id;
224 
225 	for_each_set_bit(rp_id, &rp_mask, rp_id_max) {
226 		if (cnt >= rp_cnt_max) {
227 			dev_notice(&GET_DEV(accel_dev),
228 				   "Assigned more ring pairs than supported");
229 			return -EINVAL;
230 		}
231 
232 		if (rp_in_use[rp_id]) {
233 			dev_notice(&GET_DEV(accel_dev),
234 				   "RP %u already assigned to other SLA", rp_id);
235 			return -EINVAL;
236 		}
237 
238 		if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) {
239 			dev_notice(&GET_DEV(accel_dev),
240 				   "RP %u does not support SLA service", rp_id);
241 			return -EINVAL;
242 		}
243 
244 		sla->ring_pairs_ids[cnt++] = rp_id;
245 	}
246 
247 	sla->ring_pairs_cnt = cnt;
248 
249 	return 0;
250 }
251 
mark_rps_usage(struct rl_sla * sla,bool * rp_in_use,bool used)252 static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used)
253 {
254 	u16 rp_id;
255 	int i;
256 
257 	for (i = 0; i < sla->ring_pairs_cnt; i++) {
258 		rp_id = sla->ring_pairs_ids[i];
259 		rp_in_use[rp_id] = used;
260 	}
261 }
262 
assign_rps_to_leaf(struct adf_accel_dev * accel_dev,struct rl_sla * sla,bool clear)263 static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev,
264 			       struct rl_sla *sla, bool clear)
265 {
266 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
267 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
268 	u32 base_offset = hw_data->rl_data.r2l_offset;
269 	u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK);
270 	u32 offset;
271 	int i;
272 
273 	for (i = 0; i < sla->ring_pairs_cnt; i++) {
274 		offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]);
275 		ADF_CSR_WR(pmisc_addr, offset, node_id);
276 	}
277 }
278 
assign_leaf_to_cluster(struct adf_accel_dev * accel_dev,struct rl_sla * sla,bool clear)279 static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev,
280 				   struct rl_sla *sla, bool clear)
281 {
282 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
283 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
284 	u32 base_offset = hw_data->rl_data.l2c_offset;
285 	u32 node_id = sla->node_id & LEAF_MASK;
286 	u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK);
287 	u32 offset;
288 
289 	offset = base_offset + (RL_CSR_SIZE * node_id);
290 	ADF_CSR_WR(pmisc_addr, offset, parent_id);
291 }
292 
assign_cluster_to_root(struct adf_accel_dev * accel_dev,struct rl_sla * sla,bool clear)293 static void assign_cluster_to_root(struct adf_accel_dev *accel_dev,
294 				   struct rl_sla *sla, bool clear)
295 {
296 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
297 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
298 	u32 base_offset = hw_data->rl_data.c2s_offset;
299 	u32 node_id = sla->node_id & CLUSTER_MASK;
300 	u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK);
301 	u32 offset;
302 
303 	offset = base_offset + (RL_CSR_SIZE * node_id);
304 	ADF_CSR_WR(pmisc_addr, offset, parent_id);
305 }
306 
assign_node_to_parent(struct adf_accel_dev * accel_dev,struct rl_sla * sla,bool clear_assignment)307 static void assign_node_to_parent(struct adf_accel_dev *accel_dev,
308 				  struct rl_sla *sla, bool clear_assignment)
309 {
310 	switch (sla->type) {
311 	case RL_LEAF:
312 		assign_rps_to_leaf(accel_dev, sla, clear_assignment);
313 		assign_leaf_to_cluster(accel_dev, sla, clear_assignment);
314 		break;
315 	case RL_CLUSTER:
316 		assign_cluster_to_root(accel_dev, sla, clear_assignment);
317 		break;
318 	default:
319 		break;
320 	}
321 }
322 
323 /**
324  * can_parent_afford_sla() - Verifies if parent allows to create an SLA
325  * @sla_in: pointer to user input data for a new SLA
326  * @sla_parent: pointer to parent SLA object
327  * @sla_cir: current child CIR value (only for update)
328  * @is_update: request is a update
329  *
330  * Algorithm verifies if parent has enough remaining budget to take assignment
331  * of a child with provided parameters. In update case current CIR value must be
332  * returned to budget first.
333  * PIR value cannot exceed the PIR assigned to parent.
334  *
335  * Return:
336  * * true	- SLA can be created
337  * * false	- SLA cannot be created
338  */
can_parent_afford_sla(struct adf_rl_sla_input_data * sla_in,struct rl_sla * sla_parent,u32 sla_cir,bool is_update)339 static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in,
340 				  struct rl_sla *sla_parent, u32 sla_cir,
341 				  bool is_update)
342 {
343 	u32 rem_cir = sla_parent->rem_cir;
344 
345 	if (is_update)
346 		rem_cir += sla_cir;
347 
348 	if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir)
349 		return false;
350 
351 	return true;
352 }
353 
354 /**
355  * can_node_afford_update() - Verifies if SLA can be updated with input data
356  * @sla_in: pointer to user input data for a new SLA
357  * @sla: pointer to SLA object selected for update
358  *
359  * Algorithm verifies if a new CIR value is big enough to satisfy currently
360  * assigned child SLAs and if PIR can be updated
361  *
362  * Return:
363  * * true	- SLA can be updated
364  * * false	- SLA cannot be updated
365  */
can_node_afford_update(struct adf_rl_sla_input_data * sla_in,struct rl_sla * sla)366 static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in,
367 				   struct rl_sla *sla)
368 {
369 	u32 cir_in_use = sla->cir - sla->rem_cir;
370 
371 	/* new CIR cannot be smaller then currently consumed value */
372 	if (cir_in_use > sla_in->cir)
373 		return false;
374 
375 	/* PIR of root/cluster cannot be reduced in node with assigned children */
376 	if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0)
377 		return false;
378 
379 	return true;
380 }
381 
is_enough_budget(struct adf_rl * rl_data,struct rl_sla * sla,struct adf_rl_sla_input_data * sla_in,bool is_update)382 static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla,
383 			     struct adf_rl_sla_input_data *sla_in,
384 			     bool is_update)
385 {
386 	u32 max_val = rl_data->device_data->scale_ref;
387 	struct rl_sla *parent = sla->parent;
388 	bool ret = true;
389 
390 	if (sla_in->cir > max_val || sla_in->pir > max_val)
391 		ret = false;
392 
393 	switch (sla->type) {
394 	case RL_LEAF:
395 		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
396 						  is_update);
397 		break;
398 	case RL_CLUSTER:
399 		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
400 						  is_update);
401 
402 		if (is_update)
403 			ret &= can_node_afford_update(sla_in, sla);
404 
405 		break;
406 	case RL_ROOT:
407 		if (is_update)
408 			ret &= can_node_afford_update(sla_in, sla);
409 
410 		break;
411 	default:
412 		ret = false;
413 		break;
414 	}
415 
416 	return ret;
417 }
418 
update_budget(struct rl_sla * sla,u32 old_cir,bool is_update)419 static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
420 {
421 	switch (sla->type) {
422 	case RL_LEAF:
423 		if (is_update)
424 			sla->parent->rem_cir += old_cir;
425 
426 		sla->parent->rem_cir -= sla->cir;
427 		sla->rem_cir = 0;
428 		break;
429 	case RL_CLUSTER:
430 		if (is_update) {
431 			sla->parent->rem_cir += old_cir;
432 			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
433 		} else {
434 			sla->rem_cir = sla->cir;
435 		}
436 
437 		sla->parent->rem_cir -= sla->cir;
438 		break;
439 	case RL_ROOT:
440 		if (is_update)
441 			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
442 		else
443 			sla->rem_cir = sla->cir;
444 		break;
445 	default:
446 		break;
447 	}
448 }
449 
450 /**
451  * get_next_free_sla_id() - finds next free ID in the SLA array
452  * @rl_data: Pointer to ratelimiting data structure
453  *
454  * Return:
455  * * 0 : RL_NODES_CNT_MAX	- correct ID
456  * * -ENOSPC			- all SLA slots are in use
457  */
get_next_free_sla_id(struct adf_rl * rl_data)458 static int get_next_free_sla_id(struct adf_rl *rl_data)
459 {
460 	int i = 0;
461 
462 	while (i < RL_NODES_CNT_MAX && rl_data->sla[i++])
463 		;
464 
465 	if (i == RL_NODES_CNT_MAX)
466 		return -ENOSPC;
467 
468 	return i - 1;
469 }
470 
471 /**
472  * get_next_free_node_id() - finds next free ID in the array of that node type
473  * @rl_data: Pointer to ratelimiting data structure
474  * @sla: Pointer to SLA object for which the ID is searched
475  *
476  * Return:
477  * * 0 : RL_[NODE_TYPE]_MAX	- correct ID
478  * * -ENOSPC			- all slots of that type are in use
479  */
get_next_free_node_id(struct adf_rl * rl_data,struct rl_sla * sla)480 static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla)
481 {
482 	struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev);
483 	int max_id, i, step, rp_per_leaf;
484 	struct rl_sla **sla_list;
485 
486 	rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf;
487 
488 	/*
489 	 * Static nodes mapping:
490 	 * root0 - cluster[0,4,8,12] - leaf[0-15]
491 	 * root1 - cluster[1,5,9,13] - leaf[16-31]
492 	 * root2 - cluster[2,6,10,14] - leaf[32-47]
493 	 */
494 	switch (sla->type) {
495 	case RL_LEAF:
496 		i = sla->srv * rp_per_leaf;
497 		step = 1;
498 		max_id = i + rp_per_leaf;
499 		sla_list = rl_data->leaf;
500 		break;
501 	case RL_CLUSTER:
502 		i = sla->srv;
503 		step = 4;
504 		max_id = RL_CLUSTER_MAX;
505 		sla_list = rl_data->cluster;
506 		break;
507 	case RL_ROOT:
508 		return sla->srv;
509 	default:
510 		return -EINVAL;
511 	}
512 
513 	while (i < max_id && sla_list[i])
514 		i += step;
515 
516 	if (i >= max_id)
517 		return -ENOSPC;
518 
519 	return i;
520 }
521 
adf_rl_calculate_slice_tokens(struct adf_accel_dev * accel_dev,u32 sla_val,enum adf_base_services svc_type)522 u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
523 				  enum adf_base_services svc_type)
524 {
525 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
526 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
527 	u64 avail_slice_cycles, allocated_tokens;
528 
529 	if (!sla_val)
530 		return 0;
531 
532 	/* Handle generation specific slice count adjustment */
533 	avail_slice_cycles = hw_data->clock_frequency;
534 	avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type);
535 
536 	do_div(avail_slice_cycles, device_data->scan_interval);
537 	allocated_tokens = avail_slice_cycles * sla_val;
538 	do_div(allocated_tokens, device_data->scale_ref);
539 
540 	return allocated_tokens;
541 }
542 
adf_rl_get_num_svc_aes(struct adf_accel_dev * accel_dev,enum adf_base_services svc)543 static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev,
544 				  enum adf_base_services svc)
545 {
546 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
547 
548 	if (svc >= SVC_BASE_COUNT)
549 		return 0;
550 
551 	return device_data->svc_ae_mask[svc];
552 }
553 
adf_rl_calculate_ae_cycles(struct adf_accel_dev * accel_dev,u32 sla_val,enum adf_base_services svc_type)554 u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
555 			       enum adf_base_services svc_type)
556 {
557 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
558 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
559 	u64 allocated_ae_cycles, avail_ae_cycles;
560 
561 	if (!sla_val)
562 		return 0;
563 
564 	avail_ae_cycles = hw_data->clock_frequency;
565 	avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type);
566 	do_div(avail_ae_cycles, device_data->scan_interval);
567 
568 	sla_val *= device_data->max_tp[svc_type];
569 	sla_val /= device_data->scale_ref;
570 
571 	allocated_ae_cycles = (sla_val * avail_ae_cycles);
572 	do_div(allocated_ae_cycles, device_data->max_tp[svc_type]);
573 
574 	return allocated_ae_cycles;
575 }
576 
adf_rl_calculate_pci_bw(struct adf_accel_dev * accel_dev,u32 sla_val,enum adf_base_services svc_type,bool is_bw_out)577 u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
578 			    enum adf_base_services svc_type, bool is_bw_out)
579 {
580 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
581 	u64 sla_to_bytes, allocated_bw, sla_scaled;
582 
583 	if (!sla_val)
584 		return 0;
585 
586 	sla_to_bytes = sla_val;
587 	sla_to_bytes *= device_data->max_tp[svc_type];
588 	do_div(sla_to_bytes, device_data->scale_ref);
589 
590 	sla_to_bytes *= (svc_type == SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : BYTES_PER_MBIT;
591 	if (svc_type == SVC_DC && is_bw_out)
592 		sla_to_bytes *= device_data->slices.dcpr_cnt -
593 				device_data->dcpr_correction;
594 
595 	sla_scaled = sla_to_bytes * device_data->pcie_scale_mul;
596 	do_div(sla_scaled, device_data->pcie_scale_div);
597 	allocated_bw = sla_scaled;
598 	do_div(allocated_bw, RL_TOKEN_PCIE_SIZE);
599 	do_div(allocated_bw, device_data->scan_interval);
600 
601 	return allocated_bw;
602 }
603 
604 /**
605  * add_new_sla_entry() - creates a new SLA object and fills it with user data
606  * @accel_dev: pointer to acceleration device structure
607  * @sla_in: pointer to user input data for a new SLA
608  * @sla_out: Pointer to variable that will contain the address of a new
609  *	     SLA object if the operation succeeds
610  *
611  * Return:
612  * * 0		- ok
613  * * -ENOMEM	- memory allocation failed
614  * * -EINVAL	- invalid user input
615  * * -ENOSPC	- all available SLAs are in use
616  */
add_new_sla_entry(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in,struct rl_sla ** sla_out)617 static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
618 			     struct adf_rl_sla_input_data *sla_in,
619 			     struct rl_sla **sla_out)
620 {
621 	struct adf_rl *rl_data = accel_dev->rate_limiting;
622 	struct rl_sla *sla;
623 	int ret = 0;
624 
625 	sla = kzalloc(sizeof(*sla), GFP_KERNEL);
626 	if (!sla) {
627 		ret = -ENOMEM;
628 		goto ret_err;
629 	}
630 	*sla_out = sla;
631 
632 	if (!adf_is_service_enabled(accel_dev, sla_in->srv)) {
633 		dev_notice(&GET_DEV(accel_dev),
634 			   "Provided service is not enabled\n");
635 		ret = -EINVAL;
636 		goto ret_err;
637 	}
638 
639 	sla->srv = sla_in->srv;
640 	sla->type = sla_in->type;
641 	ret = get_next_free_node_id(rl_data, sla);
642 	if (ret < 0) {
643 		dev_notice(&GET_DEV(accel_dev),
644 			   "Exceeded number of available nodes for that service\n");
645 		goto ret_err;
646 	}
647 	sla->node_id = ret;
648 
649 	ret = get_next_free_sla_id(rl_data);
650 	if (ret < 0) {
651 		dev_notice(&GET_DEV(accel_dev),
652 			   "Allocated maximum SLAs number\n");
653 		goto ret_err;
654 	}
655 	sla->sla_id = ret;
656 
657 	sla->parent = find_parent(rl_data, sla_in);
658 	if (!sla->parent && sla->type != RL_ROOT) {
659 		if (sla_in->parent_id != RL_PARENT_DEFAULT_ID)
660 			dev_notice(&GET_DEV(accel_dev),
661 				   "Provided parent ID does not exist or cannot be parent for this SLA.");
662 		else
663 			dev_notice(&GET_DEV(accel_dev),
664 				   "Unable to find parent node for this service. Is service enabled?");
665 		ret = -EINVAL;
666 		goto ret_err;
667 	}
668 
669 	if (sla->type == RL_LEAF) {
670 		ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
671 		if (!sla->ring_pairs_cnt || ret) {
672 			dev_notice(&GET_DEV(accel_dev),
673 				   "Unable to find ring pairs to assign to the leaf");
674 			if (!ret)
675 				ret = -EINVAL;
676 
677 			goto ret_err;
678 		}
679 	}
680 
681 	return 0;
682 
683 ret_err:
684 	kfree(sla);
685 	*sla_out = NULL;
686 
687 	return ret;
688 }
689 
initialize_default_nodes(struct adf_accel_dev * accel_dev)690 static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
691 {
692 	struct adf_rl *rl_data = accel_dev->rate_limiting;
693 	struct adf_rl_hw_data *device_data = rl_data->device_data;
694 	struct adf_rl_sla_input_data sla_in = { };
695 	int ret = 0;
696 	int i;
697 
698 	/* Init root for each enabled service */
699 	sla_in.type = RL_ROOT;
700 	sla_in.parent_id = RL_PARENT_DEFAULT_ID;
701 
702 	for (i = 0; i < SVC_BASE_COUNT; i++) {
703 		if (!adf_is_service_enabled(accel_dev, i))
704 			continue;
705 
706 		sla_in.cir = device_data->scale_ref;
707 		sla_in.pir = sla_in.cir;
708 		sla_in.srv = i;
709 
710 		ret = adf_rl_add_sla(accel_dev, &sla_in);
711 		if (ret)
712 			return ret;
713 	}
714 
715 	/* Init default cluster for each root */
716 	sla_in.type = RL_CLUSTER;
717 	for (i = 0; i < SVC_BASE_COUNT; i++) {
718 		if (!rl_data->root[i])
719 			continue;
720 		sla_in.cir = rl_data->root[i]->cir;
721 		sla_in.pir = sla_in.cir;
722 		sla_in.srv = rl_data->root[i]->srv;
723 
724 		ret = adf_rl_add_sla(accel_dev, &sla_in);
725 		if (ret)
726 			return ret;
727 	}
728 
729 	return 0;
730 }
731 
clear_sla(struct adf_rl * rl_data,struct rl_sla * sla)732 static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
733 {
734 	bool *rp_in_use = rl_data->rp_in_use;
735 	struct rl_sla **sla_type_arr = NULL;
736 	int i, sla_id, node_id;
737 	u32 old_cir;
738 
739 	sla_id = sla->sla_id;
740 	node_id = sla->node_id;
741 	old_cir = sla->cir;
742 	sla->cir = 0;
743 	sla->pir = 0;
744 
745 	for (i = 0; i < sla->ring_pairs_cnt; i++)
746 		rp_in_use[sla->ring_pairs_ids[i]] = false;
747 
748 	update_budget(sla, old_cir, true);
749 	adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
750 	assign_node_to_parent(rl_data->accel_dev, sla, true);
751 	adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
752 	mark_rps_usage(sla, rl_data->rp_in_use, false);
753 
754 	kfree(sla);
755 	rl_data->sla[sla_id] = NULL;
756 	sla_type_arr[node_id] = NULL;
757 }
758 
free_all_sla(struct adf_accel_dev * accel_dev)759 static void free_all_sla(struct adf_accel_dev *accel_dev)
760 {
761 	struct adf_rl *rl_data = accel_dev->rate_limiting;
762 	int sla_id;
763 
764 	mutex_lock(&rl_data->rl_lock);
765 
766 	for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) {
767 		if (!rl_data->sla[sla_id])
768 			continue;
769 
770 		kfree(rl_data->sla[sla_id]);
771 		rl_data->sla[sla_id] = NULL;
772 	}
773 
774 	mutex_unlock(&rl_data->rl_lock);
775 }
776 
777 /**
778  * add_update_sla() - handles the creation and the update of an SLA
779  * @accel_dev: pointer to acceleration device structure
780  * @sla_in: pointer to user input data for a new/updated SLA
781  * @is_update: flag to indicate if this is an update or an add operation
782  *
783  * Return:
784  * * 0		- ok
785  * * -ENOMEM	- memory allocation failed
786  * * -EINVAL	- user input data cannot be used to create SLA
787  * * -ENOSPC	- all available SLAs are in use
788  */
add_update_sla(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in,bool is_update)789 static int add_update_sla(struct adf_accel_dev *accel_dev,
790 			  struct adf_rl_sla_input_data *sla_in, bool is_update)
791 {
792 	struct adf_rl *rl_data = accel_dev->rate_limiting;
793 	struct rl_sla **sla_type_arr = NULL;
794 	struct rl_sla *sla = NULL;
795 	u32 old_cir = 0;
796 	int ret;
797 
798 	if (!sla_in) {
799 		dev_warn(&GET_DEV(accel_dev),
800 			 "SLA input data pointer is missing\n");
801 		return -EFAULT;
802 	}
803 
804 	mutex_lock(&rl_data->rl_lock);
805 
806 	/* Input validation */
807 	ret = validate_user_input(accel_dev, sla_in, is_update);
808 	if (ret)
809 		goto ret_err;
810 
811 	if (is_update) {
812 		ret = validate_sla_id(accel_dev, sla_in->sla_id);
813 		if (ret)
814 			goto ret_err;
815 
816 		sla = rl_data->sla[sla_in->sla_id];
817 		old_cir = sla->cir;
818 	} else {
819 		ret = add_new_sla_entry(accel_dev, sla_in, &sla);
820 		if (ret)
821 			goto ret_err;
822 	}
823 
824 	if (!is_enough_budget(rl_data, sla, sla_in, is_update)) {
825 		dev_notice(&GET_DEV(accel_dev),
826 			   "Input value exceeds the remaining budget%s\n",
827 			   is_update ? " or more budget is already in use" : "");
828 		ret = -EINVAL;
829 		goto ret_err;
830 	}
831 	sla->cir = sla_in->cir;
832 	sla->pir = sla_in->pir;
833 
834 	/* Apply SLA */
835 	assign_node_to_parent(accel_dev, sla, false);
836 	ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update);
837 	if (ret) {
838 		dev_notice(&GET_DEV(accel_dev),
839 			   "Failed to apply an SLA\n");
840 		goto ret_err;
841 	}
842 	update_budget(sla, old_cir, is_update);
843 
844 	if (!is_update) {
845 		mark_rps_usage(sla, rl_data->rp_in_use, true);
846 		adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
847 		sla_type_arr[sla->node_id] = sla;
848 		rl_data->sla[sla->sla_id] = sla;
849 	}
850 
851 	sla_in->sla_id = sla->sla_id;
852 	goto ret_ok;
853 
854 ret_err:
855 	if (!is_update) {
856 		sla_in->sla_id = -1;
857 		kfree(sla);
858 	}
859 ret_ok:
860 	mutex_unlock(&rl_data->rl_lock);
861 	return ret;
862 }
863 
864 /**
865  * adf_rl_add_sla() - handles the creation of an SLA
866  * @accel_dev: pointer to acceleration device structure
867  * @sla_in: pointer to user input data required to add an SLA
868  *
869  * Return:
870  * * 0		- ok
871  * * -ENOMEM	- memory allocation failed
872  * * -EINVAL	- invalid user input
873  * * -ENOSPC	- all available SLAs are in use
874  */
adf_rl_add_sla(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in)875 int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
876 		   struct adf_rl_sla_input_data *sla_in)
877 {
878 	return add_update_sla(accel_dev, sla_in, false);
879 }
880 
881 /**
882  * adf_rl_update_sla() - handles the update of an SLA
883  * @accel_dev: pointer to acceleration device structure
884  * @sla_in: pointer to user input data required to update an SLA
885  *
886  * Return:
887  * * 0		- ok
888  * * -EINVAL	- user input data cannot be used to update SLA
889  */
adf_rl_update_sla(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in)890 int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
891 		      struct adf_rl_sla_input_data *sla_in)
892 {
893 	return add_update_sla(accel_dev, sla_in, true);
894 }
895 
896 /**
897  * adf_rl_get_sla() - returns an existing SLA data
898  * @accel_dev: pointer to acceleration device structure
899  * @sla_in: pointer to user data where SLA info will be stored
900  *
901  * The sla_id for which data are requested should be set in sla_id structure
902  *
903  * Return:
904  * * 0		- ok
905  * * -EINVAL	- provided sla_id does not exist
906  */
adf_rl_get_sla(struct adf_accel_dev * accel_dev,struct adf_rl_sla_input_data * sla_in)907 int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
908 		   struct adf_rl_sla_input_data *sla_in)
909 {
910 	struct rl_sla *sla;
911 	int ret, i;
912 
913 	ret = validate_sla_id(accel_dev, sla_in->sla_id);
914 	if (ret)
915 		return ret;
916 
917 	sla = accel_dev->rate_limiting->sla[sla_in->sla_id];
918 	sla_in->type = sla->type;
919 	sla_in->srv = sla->srv;
920 	sla_in->cir = sla->cir;
921 	sla_in->pir = sla->pir;
922 	sla_in->rp_mask = 0U;
923 	if (sla->parent)
924 		sla_in->parent_id = sla->parent->sla_id;
925 	else
926 		sla_in->parent_id = RL_PARENT_DEFAULT_ID;
927 
928 	for (i = 0; i < sla->ring_pairs_cnt; i++)
929 		sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]);
930 
931 	return 0;
932 }
933 
934 /**
935  * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for
936  *				       selected service or provided sla_id
937  * @accel_dev: pointer to acceleration device structure
938  * @srv: service ID for which capability is requested
939  * @sla_id: ID of the cluster or root to which we want assign a new SLA
940  *
941  * Check if the provided SLA id is valid. If it is and the service matches
942  * the requested service and the type is cluster or root, return the remaining
943  * capability.
944  * If the provided ID does not match the service or type, return the remaining
945  * capacity of the default cluster for that service.
946  *
947  * Return:
948  * * Positive value	- correct remaining value
949  * * -EINVAL		- algorithm cannot find a remaining value for provided data
950  */
adf_rl_get_capability_remaining(struct adf_accel_dev * accel_dev,enum adf_base_services srv,int sla_id)951 int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
952 				    enum adf_base_services srv, int sla_id)
953 {
954 	struct adf_rl *rl_data = accel_dev->rate_limiting;
955 	struct rl_sla *sla = NULL;
956 	int i;
957 
958 	if (srv >= SVC_BASE_COUNT)
959 		return -EINVAL;
960 
961 	if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
962 		sla = rl_data->sla[sla_id];
963 
964 		if (sla->srv == srv && sla->type <= RL_CLUSTER)
965 			goto ret_ok;
966 	}
967 
968 	for (i = 0; i < RL_CLUSTER_MAX; i++) {
969 		if (!rl_data->cluster[i])
970 			continue;
971 
972 		if (rl_data->cluster[i]->srv == srv) {
973 			sla = rl_data->cluster[i];
974 			goto ret_ok;
975 		}
976 	}
977 
978 	return -EINVAL;
979 ret_ok:
980 	return sla->rem_cir;
981 }
982 
983 /**
984  * adf_rl_remove_sla() - removes provided sla_id
985  * @accel_dev: pointer to acceleration device structure
986  * @sla_id: ID of the cluster or root to which we want assign an new SLA
987  *
988  * Return:
989  * * 0		- ok
990  * * -EINVAL	- wrong sla_id or it still have assigned children
991  */
adf_rl_remove_sla(struct adf_accel_dev * accel_dev,u32 sla_id)992 int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
993 {
994 	struct adf_rl *rl_data = accel_dev->rate_limiting;
995 	struct rl_sla *sla;
996 	int ret = 0;
997 
998 	mutex_lock(&rl_data->rl_lock);
999 	ret = validate_sla_id(accel_dev, sla_id);
1000 	if (ret)
1001 		goto err_ret;
1002 
1003 	sla = rl_data->sla[sla_id];
1004 
1005 	if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
1006 		dev_notice(&GET_DEV(accel_dev),
1007 			   "To remove parent SLA all its children must be removed first");
1008 		ret = -EINVAL;
1009 		goto err_ret;
1010 	}
1011 
1012 	clear_sla(rl_data, sla);
1013 
1014 err_ret:
1015 	mutex_unlock(&rl_data->rl_lock);
1016 	return ret;
1017 }
1018 
1019 /**
1020  * adf_rl_remove_sla_all() - removes all SLAs from device
1021  * @accel_dev: pointer to acceleration device structure
1022  * @incl_default: set to true if default SLAs also should be removed
1023  */
adf_rl_remove_sla_all(struct adf_accel_dev * accel_dev,bool incl_default)1024 void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
1025 {
1026 	struct adf_rl *rl_data = accel_dev->rate_limiting;
1027 	int end_type = incl_default ? RL_ROOT : RL_LEAF;
1028 	struct rl_sla **sla_type_arr = NULL;
1029 	u32 max_id;
1030 	int i, j;
1031 
1032 	mutex_lock(&rl_data->rl_lock);
1033 
1034 	/* Unregister and remove all SLAs */
1035 	for (j = RL_LEAF; j >= end_type; j--) {
1036 		max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr);
1037 
1038 		for (i = 0; i < max_id; i++) {
1039 			if (!sla_type_arr[i])
1040 				continue;
1041 
1042 			clear_sla(rl_data, sla_type_arr[i]);
1043 		}
1044 	}
1045 
1046 	mutex_unlock(&rl_data->rl_lock);
1047 }
1048 
adf_rl_init(struct adf_accel_dev * accel_dev)1049 int adf_rl_init(struct adf_accel_dev *accel_dev)
1050 {
1051 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
1052 	struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data;
1053 	struct adf_rl *rl;
1054 	int ret = 0;
1055 
1056 	/* Validate device parameters */
1057 	if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_ASYM]) ||
1058 	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_SYM]) ||
1059 	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_DC]) ||
1060 	    RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
1061 	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
1062 	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
1063 	    RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) {
1064 		ret = -EOPNOTSUPP;
1065 		goto err_ret;
1066 	}
1067 
1068 	rl = kzalloc(sizeof(*rl), GFP_KERNEL);
1069 	if (!rl) {
1070 		ret = -ENOMEM;
1071 		goto err_ret;
1072 	}
1073 
1074 	mutex_init(&rl->rl_lock);
1075 	rl->device_data = &accel_dev->hw_device->rl_data;
1076 	rl->accel_dev = accel_dev;
1077 	init_rwsem(&rl->user_input.lock);
1078 	accel_dev->rate_limiting = rl;
1079 
1080 err_ret:
1081 	return ret;
1082 }
1083 
adf_rl_start(struct adf_accel_dev * accel_dev)1084 int adf_rl_start(struct adf_accel_dev *accel_dev)
1085 {
1086 	struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data;
1087 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
1088 	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
1089 	int ret;
1090 
1091 	if (!accel_dev->rate_limiting) {
1092 		ret = -EOPNOTSUPP;
1093 		goto ret_err;
1094 	}
1095 
1096 	if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
1097 		dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n");
1098 		ret = -EOPNOTSUPP;
1099 		goto ret_free;
1100 	}
1101 
1102 	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset,
1103 		   RL_TOKEN_GRANULARITY_PCIEIN_BUCKET);
1104 	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset,
1105 		   RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET);
1106 
1107 	ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices);
1108 	if (ret) {
1109 		dev_err(&GET_DEV(accel_dev), "initialization failed\n");
1110 		goto ret_free;
1111 	}
1112 
1113 	ret = initialize_default_nodes(accel_dev);
1114 	if (ret) {
1115 		dev_err(&GET_DEV(accel_dev),
1116 			"failed to initialize default SLAs\n");
1117 		goto ret_sla_rm;
1118 	}
1119 
1120 	ret = adf_sysfs_rl_add(accel_dev);
1121 	if (ret) {
1122 		dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n");
1123 		goto ret_sysfs_rm;
1124 	}
1125 
1126 	return 0;
1127 
1128 ret_sysfs_rm:
1129 	adf_sysfs_rl_rm(accel_dev);
1130 ret_sla_rm:
1131 	adf_rl_remove_sla_all(accel_dev, true);
1132 ret_free:
1133 	kfree(accel_dev->rate_limiting);
1134 	accel_dev->rate_limiting = NULL;
1135 ret_err:
1136 	return ret;
1137 }
1138 
adf_rl_stop(struct adf_accel_dev * accel_dev)1139 void adf_rl_stop(struct adf_accel_dev *accel_dev)
1140 {
1141 	if (!accel_dev->rate_limiting)
1142 		return;
1143 
1144 	adf_sysfs_rl_rm(accel_dev);
1145 	free_all_sla(accel_dev);
1146 }
1147 
adf_rl_exit(struct adf_accel_dev * accel_dev)1148 void adf_rl_exit(struct adf_accel_dev *accel_dev)
1149 {
1150 	if (!accel_dev->rate_limiting)
1151 		return;
1152 
1153 	kfree(accel_dev->rate_limiting);
1154 	accel_dev->rate_limiting = NULL;
1155 }
1156