1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/errno.h>
5 #include <linux/pci.h>
6 #include <linux/string_choices.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_cfg_services.h"
10 #include "adf_common_drv.h"
11
12 #define UNSET_RING_NUM -1
13
14 static const char * const state_operations[] = {
15 [DEV_DOWN] = "down",
16 [DEV_UP] = "up",
17 };
18
state_show(struct device * dev,struct device_attribute * attr,char * buf)19 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
20 char *buf)
21 {
22 struct adf_accel_dev *accel_dev;
23
24 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
25 if (!accel_dev)
26 return -EINVAL;
27
28 return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev)));
29 }
30
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)31 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
32 const char *buf, size_t count)
33 {
34 struct adf_accel_dev *accel_dev;
35 u32 accel_id;
36 int ret;
37
38 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
39 if (!accel_dev)
40 return -EINVAL;
41
42 accel_id = accel_dev->accel_id;
43
44 if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
45 dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
46 return -EBUSY;
47 }
48
49 ret = sysfs_match_string(state_operations, buf);
50 if (ret < 0)
51 return ret;
52
53 switch (ret) {
54 case DEV_DOWN:
55 dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
56
57 if (!adf_dev_started(accel_dev)) {
58 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
59 accel_id);
60
61 break;
62 }
63
64 ret = adf_dev_down(accel_dev);
65 if (ret)
66 return ret;
67
68 break;
69 case DEV_UP:
70 dev_info(dev, "Starting device qat_dev%d\n", accel_id);
71
72 ret = adf_dev_up(accel_dev, true);
73 if (ret == -EALREADY) {
74 break;
75 } else if (ret) {
76 dev_err(dev, "Failed to start device qat_dev%d\n",
77 accel_id);
78 adf_dev_down(accel_dev);
79 return ret;
80 }
81 break;
82 default:
83 return -EINVAL;
84 }
85
86 return count;
87 }
88
cfg_services_show(struct device * dev,struct device_attribute * attr,char * buf)89 static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
90 char *buf)
91 {
92 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
93 struct adf_accel_dev *accel_dev;
94 int ret;
95
96 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
97 if (!accel_dev)
98 return -EINVAL;
99
100 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
101 ADF_SERVICES_ENABLED, services);
102 if (ret)
103 return ret;
104
105 return sysfs_emit(buf, "%s\n", services);
106 }
107
adf_sysfs_update_dev_config(struct adf_accel_dev * accel_dev,const char * services)108 static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
109 const char *services)
110 {
111 return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
112 ADF_SERVICES_ENABLED, services,
113 ADF_STR);
114 }
115
cfg_services_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)116 static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
117 const char *buf, size_t count)
118 {
119 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
120 struct adf_hw_device_data *hw_data;
121 struct adf_accel_dev *accel_dev;
122 int ret;
123
124 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
125 if (!accel_dev)
126 return -EINVAL;
127
128 ret = adf_parse_service_string(accel_dev, buf, count, services,
129 ADF_CFG_MAX_VAL_LEN_IN_BYTES);
130 if (ret)
131 return ret;
132
133 if (adf_dev_started(accel_dev)) {
134 dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
135 accel_dev->accel_id);
136 return -EINVAL;
137 }
138
139 ret = adf_sysfs_update_dev_config(accel_dev, services);
140 if (ret < 0)
141 return ret;
142
143 hw_data = GET_HW_DATA(accel_dev);
144
145 /* Update capabilities mask after change in configuration.
146 * A call to this function is required as capabilities are, at the
147 * moment, tied to configuration
148 */
149 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
150 if (!hw_data->accel_capabilities_mask)
151 return -EINVAL;
152
153 return count;
154 }
155
pm_idle_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)156 static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
157 char *buf)
158 {
159 char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
160 struct adf_accel_dev *accel_dev;
161 int ret;
162
163 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
164 if (!accel_dev)
165 return -EINVAL;
166
167 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
168 ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
169 if (ret)
170 return sysfs_emit(buf, "1\n");
171
172 return sysfs_emit(buf, "%s\n", pm_idle_enabled);
173 }
174
pm_idle_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)175 static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
176 const char *buf, size_t count)
177 {
178 unsigned long pm_idle_enabled_cfg_val;
179 struct adf_accel_dev *accel_dev;
180 bool pm_idle_enabled;
181 int ret;
182
183 ret = kstrtobool(buf, &pm_idle_enabled);
184 if (ret)
185 return ret;
186
187 pm_idle_enabled_cfg_val = pm_idle_enabled;
188 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
189 if (!accel_dev)
190 return -EINVAL;
191
192 if (adf_dev_started(accel_dev)) {
193 dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
194 accel_dev->accel_id);
195 return -EINVAL;
196 }
197
198 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
199 ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
200 ADF_DEC);
201 if (ret)
202 return ret;
203
204 return count;
205 }
206 static DEVICE_ATTR_RW(pm_idle_enabled);
207
auto_reset_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
209 char *buf)
210 {
211 struct adf_accel_dev *accel_dev;
212
213 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
214 if (!accel_dev)
215 return -EINVAL;
216
217 return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error));
218 }
219
auto_reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)220 static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
221 const char *buf, size_t count)
222 {
223 struct adf_accel_dev *accel_dev;
224 bool enabled = false;
225 int ret;
226
227 ret = kstrtobool(buf, &enabled);
228 if (ret)
229 return ret;
230
231 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
232 if (!accel_dev)
233 return -EINVAL;
234
235 accel_dev->autoreset_on_error = enabled;
236
237 return count;
238 }
239 static DEVICE_ATTR_RW(auto_reset);
240
241 static DEVICE_ATTR_RW(state);
242 static DEVICE_ATTR_RW(cfg_services);
243
rp2srv_show(struct device * dev,struct device_attribute * attr,char * buf)244 static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
245 char *buf)
246 {
247 struct adf_hw_device_data *hw_data;
248 struct adf_accel_dev *accel_dev;
249 enum adf_cfg_service_type svc;
250
251 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
252 if (!accel_dev)
253 return -EINVAL;
254
255 hw_data = GET_HW_DATA(accel_dev);
256
257 if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
258 return -EINVAL;
259
260 down_read(&accel_dev->sysfs.lock);
261 svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num %
262 hw_data->num_banks_per_vf);
263 up_read(&accel_dev->sysfs.lock);
264
265 switch (svc) {
266 case COMP:
267 return sysfs_emit(buf, "%s\n", ADF_CFG_DC);
268 case SYM:
269 return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
270 case ASYM:
271 return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
272 default:
273 break;
274 }
275 return -EINVAL;
276 }
277
rp2srv_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)278 static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
279 const char *buf, size_t count)
280 {
281 struct adf_accel_dev *accel_dev;
282 int num_rings, ret;
283 unsigned int ring;
284
285 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
286 if (!accel_dev)
287 return -EINVAL;
288
289 ret = kstrtouint(buf, 10, &ring);
290 if (ret)
291 return ret;
292
293 num_rings = GET_MAX_BANKS(accel_dev);
294 if (ring >= num_rings) {
295 dev_err(&GET_DEV(accel_dev),
296 "Device does not support more than %u ring pairs\n",
297 num_rings);
298 return -EINVAL;
299 }
300
301 down_write(&accel_dev->sysfs.lock);
302 accel_dev->sysfs.ring_num = ring;
303 up_write(&accel_dev->sysfs.lock);
304
305 return count;
306 }
307 static DEVICE_ATTR_RW(rp2srv);
308
num_rps_show(struct device * dev,struct device_attribute * attr,char * buf)309 static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr,
310 char *buf)
311 {
312 struct adf_accel_dev *accel_dev;
313
314 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
315 if (!accel_dev)
316 return -EINVAL;
317
318 return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev));
319 }
320 static DEVICE_ATTR_RO(num_rps);
321
322 static struct attribute *qat_attrs[] = {
323 &dev_attr_state.attr,
324 &dev_attr_cfg_services.attr,
325 &dev_attr_pm_idle_enabled.attr,
326 &dev_attr_rp2srv.attr,
327 &dev_attr_num_rps.attr,
328 &dev_attr_auto_reset.attr,
329 NULL,
330 };
331
332 static struct attribute_group qat_group = {
333 .attrs = qat_attrs,
334 .name = "qat",
335 };
336
adf_sysfs_init(struct adf_accel_dev * accel_dev)337 int adf_sysfs_init(struct adf_accel_dev *accel_dev)
338 {
339 int ret;
340
341 ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
342 if (ret) {
343 dev_err(&GET_DEV(accel_dev),
344 "Failed to create qat attribute group: %d\n", ret);
345 }
346
347 accel_dev->sysfs.ring_num = UNSET_RING_NUM;
348
349 return ret;
350 }
351 EXPORT_SYMBOL_GPL(adf_sysfs_init);
352