1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/errno.h>
5 #include <linux/pci.h>
6 #include "adf_accel_devices.h"
7 #include "adf_cfg.h"
8 #include "adf_cfg_services.h"
9 #include "adf_common_drv.h"
10
11 #define UNSET_RING_NUM -1
12
13 static const char * const state_operations[] = {
14 [DEV_DOWN] = "down",
15 [DEV_UP] = "up",
16 };
17
state_show(struct device * dev,struct device_attribute * attr,char * buf)18 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
19 char *buf)
20 {
21 struct adf_accel_dev *accel_dev;
22 char *state;
23
24 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
25 if (!accel_dev)
26 return -EINVAL;
27
28 state = adf_dev_started(accel_dev) ? "up" : "down";
29 return sysfs_emit(buf, "%s\n", state);
30 }
31
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)32 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
33 const char *buf, size_t count)
34 {
35 struct adf_accel_dev *accel_dev;
36 u32 accel_id;
37 int ret;
38
39 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
40 if (!accel_dev)
41 return -EINVAL;
42
43 accel_id = accel_dev->accel_id;
44
45 if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
46 dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
47 return -EBUSY;
48 }
49
50 ret = sysfs_match_string(state_operations, buf);
51 if (ret < 0)
52 return ret;
53
54 switch (ret) {
55 case DEV_DOWN:
56 dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
57
58 if (!adf_dev_started(accel_dev)) {
59 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
60 accel_id);
61
62 break;
63 }
64
65 ret = adf_dev_down(accel_dev, true);
66 if (ret)
67 return ret;
68
69 break;
70 case DEV_UP:
71 dev_info(dev, "Starting device qat_dev%d\n", accel_id);
72
73 ret = adf_dev_up(accel_dev, true);
74 if (ret == -EALREADY) {
75 break;
76 } else if (ret) {
77 dev_err(dev, "Failed to start device qat_dev%d\n",
78 accel_id);
79 adf_dev_down(accel_dev, true);
80 return ret;
81 }
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 return count;
88 }
89
cfg_services_show(struct device * dev,struct device_attribute * attr,char * buf)90 static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
91 char *buf)
92 {
93 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
94 struct adf_accel_dev *accel_dev;
95 int ret;
96
97 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
98 if (!accel_dev)
99 return -EINVAL;
100
101 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
102 ADF_SERVICES_ENABLED, services);
103 if (ret)
104 return ret;
105
106 return sysfs_emit(buf, "%s\n", services);
107 }
108
adf_sysfs_update_dev_config(struct adf_accel_dev * accel_dev,const char * services)109 static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
110 const char *services)
111 {
112 return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
113 ADF_SERVICES_ENABLED, services,
114 ADF_STR);
115 }
116
cfg_services_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)117 static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
118 const char *buf, size_t count)
119 {
120 struct adf_hw_device_data *hw_data;
121 struct adf_accel_dev *accel_dev;
122 int ret;
123
124 ret = sysfs_match_string(adf_cfg_services, buf);
125 if (ret < 0)
126 return ret;
127
128 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
129 if (!accel_dev)
130 return -EINVAL;
131
132 if (adf_dev_started(accel_dev)) {
133 dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
134 accel_dev->accel_id);
135 return -EINVAL;
136 }
137
138 ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
139 if (ret < 0)
140 return ret;
141
142 hw_data = GET_HW_DATA(accel_dev);
143
144 /* Update capabilities mask after change in configuration.
145 * A call to this function is required as capabilities are, at the
146 * moment, tied to configuration
147 */
148 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
149 if (!hw_data->accel_capabilities_mask)
150 return -EINVAL;
151
152 return count;
153 }
154
pm_idle_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)155 static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
156 char *buf)
157 {
158 char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
159 struct adf_accel_dev *accel_dev;
160 int ret;
161
162 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
163 if (!accel_dev)
164 return -EINVAL;
165
166 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
167 ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
168 if (ret)
169 return sysfs_emit(buf, "1\n");
170
171 return sysfs_emit(buf, "%s\n", pm_idle_enabled);
172 }
173
pm_idle_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)174 static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t count)
176 {
177 unsigned long pm_idle_enabled_cfg_val;
178 struct adf_accel_dev *accel_dev;
179 bool pm_idle_enabled;
180 int ret;
181
182 ret = kstrtobool(buf, &pm_idle_enabled);
183 if (ret)
184 return ret;
185
186 pm_idle_enabled_cfg_val = pm_idle_enabled;
187 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
188 if (!accel_dev)
189 return -EINVAL;
190
191 if (adf_dev_started(accel_dev)) {
192 dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
193 accel_dev->accel_id);
194 return -EINVAL;
195 }
196
197 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
198 ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
199 ADF_DEC);
200 if (ret)
201 return ret;
202
203 return count;
204 }
205 static DEVICE_ATTR_RW(pm_idle_enabled);
206
207 static DEVICE_ATTR_RW(state);
208 static DEVICE_ATTR_RW(cfg_services);
209
rp2srv_show(struct device * dev,struct device_attribute * attr,char * buf)210 static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
211 char *buf)
212 {
213 struct adf_hw_device_data *hw_data;
214 struct adf_accel_dev *accel_dev;
215 enum adf_cfg_service_type svc;
216
217 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
218 if (!accel_dev)
219 return -EINVAL;
220
221 hw_data = GET_HW_DATA(accel_dev);
222
223 if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
224 return -EINVAL;
225
226 down_read(&accel_dev->sysfs.lock);
227 svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num %
228 hw_data->num_banks_per_vf);
229 up_read(&accel_dev->sysfs.lock);
230
231 switch (svc) {
232 case COMP:
233 return sysfs_emit(buf, "%s\n", ADF_CFG_DC);
234 case SYM:
235 return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
236 case ASYM:
237 return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
238 default:
239 break;
240 }
241 return -EINVAL;
242 }
243
rp2srv_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)244 static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
245 const char *buf, size_t count)
246 {
247 struct adf_accel_dev *accel_dev;
248 int num_rings, ret;
249 unsigned int ring;
250
251 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
252 if (!accel_dev)
253 return -EINVAL;
254
255 ret = kstrtouint(buf, 10, &ring);
256 if (ret)
257 return ret;
258
259 num_rings = GET_MAX_BANKS(accel_dev);
260 if (ring >= num_rings) {
261 dev_err(&GET_DEV(accel_dev),
262 "Device does not support more than %u ring pairs\n",
263 num_rings);
264 return -EINVAL;
265 }
266
267 down_write(&accel_dev->sysfs.lock);
268 accel_dev->sysfs.ring_num = ring;
269 up_write(&accel_dev->sysfs.lock);
270
271 return count;
272 }
273 static DEVICE_ATTR_RW(rp2srv);
274
num_rps_show(struct device * dev,struct device_attribute * attr,char * buf)275 static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr,
276 char *buf)
277 {
278 struct adf_accel_dev *accel_dev;
279
280 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
281 if (!accel_dev)
282 return -EINVAL;
283
284 return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev));
285 }
286 static DEVICE_ATTR_RO(num_rps);
287
288 static struct attribute *qat_attrs[] = {
289 &dev_attr_state.attr,
290 &dev_attr_cfg_services.attr,
291 &dev_attr_pm_idle_enabled.attr,
292 &dev_attr_rp2srv.attr,
293 &dev_attr_num_rps.attr,
294 NULL,
295 };
296
297 static struct attribute_group qat_group = {
298 .attrs = qat_attrs,
299 .name = "qat",
300 };
301
adf_sysfs_init(struct adf_accel_dev * accel_dev)302 int adf_sysfs_init(struct adf_accel_dev *accel_dev)
303 {
304 int ret;
305
306 ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
307 if (ret) {
308 dev_err(&GET_DEV(accel_dev),
309 "Failed to create qat attribute group: %d\n", ret);
310 }
311
312 accel_dev->sysfs.ring_num = UNSET_RING_NUM;
313
314 return ret;
315 }
316 EXPORT_SYMBOL_GPL(adf_sysfs_init);
317